# Load packages, source functions, set plot parameters, get behavioral and DIC dfs
packages_to_load <- c(
"dplyr",
"tidyverse",
"latex2exp",
"purrr",
"tidyr",
"rlang",
"patchwork",
"haven",
"RWiener",
"psych",
"data.table",
"lme4",
"lmerTest",
"foreach",
"beset",
"faux"
)
sapply(packages_to_load, require, character.only=TRUE)
## Loading required package: dplyr
##
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
## Loading required package: tidyverse
## ── Attaching packages ─────────────────────────────────────── tidyverse 1.3.1 ──
## ✓ ggplot2 3.3.5 ✓ purrr 0.3.4
## ✓ tibble 3.1.6 ✓ stringr 1.4.0
## ✓ tidyr 1.2.0 ✓ forcats 0.5.1
## ✓ readr 2.1.2
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## x dplyr::filter() masks stats::filter()
## x dplyr::lag() masks stats::lag()
## Loading required package: latex2exp
## Loading required package: rlang
##
## Attaching package: 'rlang'
## The following objects are masked from 'package:purrr':
##
## %@%, as_function, flatten, flatten_chr, flatten_dbl, flatten_int,
## flatten_lgl, flatten_raw, invoke, splice
## Loading required package: patchwork
## Loading required package: haven
## Loading required package: RWiener
## Loading required package: psych
##
## Attaching package: 'psych'
## The following objects are masked from 'package:ggplot2':
##
## %+%, alpha
## Loading required package: data.table
##
## Attaching package: 'data.table'
## The following object is masked from 'package:rlang':
##
## :=
## The following object is masked from 'package:purrr':
##
## transpose
## The following objects are masked from 'package:dplyr':
##
## between, first, last
## Loading required package: lme4
## Loading required package: Matrix
##
## Attaching package: 'Matrix'
## The following objects are masked from 'package:tidyr':
##
## expand, pack, unpack
## Loading required package: lmerTest
##
## Attaching package: 'lmerTest'
## The following object is masked from 'package:lme4':
##
## lmer
## The following object is masked from 'package:stats':
##
## step
## Loading required package: foreach
##
## Attaching package: 'foreach'
## The following objects are masked from 'package:purrr':
##
## accumulate, when
## Loading required package: beset
##
## Attaching package: 'beset'
## The following object is masked from 'package:psych':
##
## r2d
## Loading required package: faux
##
## ************
## Welcome to faux. For support and examples visit:
## https://debruine.github.io/faux/
## - Get and set global package options with: faux_options()
## ************
##
## Attaching package: 'faux'
## The following object is masked from 'package:rlang':
##
## %||%
## The following object is masked from 'package:purrr':
##
## %||%
## dplyr tidyverse latex2exp purrr tidyr rlang patchwork
## TRUE TRUE TRUE TRUE TRUE TRUE TRUE
## haven RWiener psych data.table lme4 lmerTest foreach
## TRUE TRUE TRUE TRUE TRUE TRUE TRUE
## beset faux
## TRUE TRUE
sf <- function() sapply(fs, source)
fs <- c(
paste0('./Modules/', list.files('./Modules/'))
)
sf()
## ./Modules/GenPPCs.R ./Modules/GenPPCs2.R ./Modules/Utils.R
## value ? ? ?
## visible FALSE FALSE FALSE
SetPlotPars()
8/2/22 - Replacing old paths with public access data throughout doc wherever data shareable (leaving out raw trace files because they’re too big)
# Get main SRET_CTL df..
#bx_df <- read.csv("./../../data/cleaned_files/s_bdf.csv")
bx_df <- read.csv("./Public_Data/s_bdf.csv")
# .. and df with some extra info to put in
#extra_df_full <- read.csv("./../../data/raw_files/tx_cond+ffmq+maas_from_KW_6.24.2020.csv")
extra_df_full <- read.csv("./Public_Data/tx_assignments.csv")
4/24/22 - See non-public directory for demographics
DDM Vis
starting_point <- .05
non_dec_time <- c(.1)
threshold <- 0
decision_bounds <- c(max(non_dec_time), (max(non_dec_time)+1+threshold))
decision_time <- seq(decision_bounds[1], decision_bounds[2],
by=(decision_bounds[2]-decision_bounds[1])/100)
CreateDriftTrajectories <- function(upper_drift, lower_drift, n_trajects, label, sd_drift=.02) {
### Simulate n_trajects drift trajectories ###
dfs <- list()
for (nt in 1:n_trajects) {
this_upper_drift <- rnorm(1, upper_drift, .01)
this_lower_drift <- rnorm(1, lower_drift, .01)
traject <- c()
for (dec_ts in seq_along(decision_time)) {
y_incr <- rnorm(1, this_upper_drift, sd=sd_drift)
n_incr <- rnorm(1, this_lower_drift, sd=sd_drift)
incr_total <- sum(y_incr, n_incr)
if (dec_ts == 1) traject[dec_ts] <- starting_point
if (dec_ts > 1) traject[dec_ts] <- traject[dec_ts-1] + incr_total
}
traject[traject > 1] <- 1
traject[traject < -1] <- -1
dfs[[nt]] <- setNames(data.frame(traject, label, decision_time, nt),
c("trajectory", "drift_type", "step_index", "run"))
}
dfs %>% bind_rows()
}
drift_1 <- CreateDriftTrajectories(.06, -.01, 12, "high", sd_drift=.02)
drift_2 <- CreateDriftTrajectories(.06, -.03, 12, "medium", sd_drift=.02)
drift_3 <- CreateDriftTrajectories(.03, -.07, 12, "low", sd_drift=.02)
all_drifts <- rbind(drift_1, drift_2, drift_3)
all_drifts$drift_type <- factor(all_drifts$drift_type, levels=c("high", "medium", "low"))
rect <- data.frame(xmin=0, xmax=seq(non_dec_time, .2, .033), ymin=-1, ymax=1)
drift_sample <- ggplot(all_drifts, aes(step_index, trajectory, group=interaction(run, drift_type))) +
geom_line(size = 2, alpha=.5, aes(color=drift_type)) +
geom_hline(yintercept=c(-seq(1, 1.6, .1), seq(1, 1.6, .1)),
alpha=c(rev(seq(.4, 1, .1)), rev(seq(.4, 1, .1))), color="gray57", size=2) +
geom_segment(aes(x=0, xend=non_dec_time, y=.05, yend=.05),
inherit.aes = FALSE, color="black", size = 2) +
geom_segment(aes(x=0, xend=non_dec_time, y=.2, yend=.2),
color="gray57", size = 2) +
geom_segment(aes(x=0, xend=non_dec_time, y=-.10, yend=-.10),
inherit.aes = FALSE, color="gray57", size = 2) +
ga + ap + theme(legend.text = element_text(size = 18),
legend.title=element_text(size=20, face = "bold"),
legend.key.size = unit(1.5, 'lines')) +
scale_color_manual(name=" drift rate",
values = c("green", "orange", "red")) +
xlab("time") + ylab("") +
#ylim(0, -2) +
geom_rect(data = rect, aes(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax),
alpha = .1, inherit.aes = FALSE) +
# xlim(0, xlim_ub) +
theme(axis.text=element_blank()) + theme(axis.ticks=element_blank()) #+
drift_sample
#ggsave("../paper/figs/drift_sample.png", drift_sample, width=11, height=6, dpi=700)
Simulating densities — commented bc it takes a long time to run
# a <- 1.93
# t <- .9
# b <- .5
#
# #high_drift_samples <-
# diff_drift_samples <-
# rbind(
# data.frame(rwiener(5e4, alpha=a, tau=t, beta=b, delta =.7), type="high"),
# data.frame(rwiener(5e4, alpha=a, tau=t, beta=b, delta =.04), type="medium"),
# data.frame(rwiener(5e4, alpha=a, tau=t, beta=b, delta = -.7), type="low"))
#
# diff_drift_samples[diff_drift_samples$resp=="lower", "q"] <- -diff_drift_samples[diff_drift_samples$resp=="lower", "q"]
#
# #diff_drift_samples$resp=="lower"
# test_diff_plot <- ggplot(diff_drift_samples, aes(x=q, color=type)) +
# geom_density(size=3.5) + ga + ap + tol +
# scale_color_manual(
# values = c("green", "orange", "red")) +
# theme(axis.text = element_blank(), axis.ticks = element_blank()) + xlab("") + ylab("")
# test_diff_plot
#ggsave("../paper/figs/diff_drift_dists_resize.png", test_diff_plot, width=35, height=3)
GetMapEsts <- function(s_df, format="long") {
### Returns dataframe of Maximum a Posteriori estimates for the variables in a dataframe.
# s_df must be a subject-level dataframe (ie. excluding group-level estimates) ###
# Handle entry of dfs other than subject-only traces
if (!all(grepl("subj.", names(s_df)))) stop("Must enter a trace data-frame with ONLY
subject-level vars")
# Posterior mean
col_means <- colMeans(s_df)
# Map est
if (format == "long") {
mdf <- foreach(i = 1:ncol(s_df)) %do% {
this_par <- s_df[, i]
mdf_row <- data.table(
"ID"=unlist(map(strsplit(names(s_df)[i], "subj."), 2)),
"var"=unlist(map(strsplit(names(s_df)[i], "subj."), 1)),
"map_est"=as.numeric(bayestestR::map_estimate(this_par))
)
mdf_row
} %>% bind_rows()
z_IDs <- grep("trans", mdf$ID)
if (length(z_IDs)) {
mdf[grepl("trans", mdf$ID), "ID"] <- unique(mdf$ID[!grepl("trans", mdf$ID)])
}
}
if (format == "short") {
# Put in short format with each variable as a column name
# First extract the unique vars that will serve as column names
unique_vars <- unique(unlist(map(strsplit(names(col_means), "subj."), 1)))
unique_IDs <- unique(unlist(map(strsplit(names(col_means), "subj."), 2)))
# Remove putative IDs with "trans" appended in case of z
fake_IDs <- grep("trans", unique_IDs)
if (length(fake_IDs)) unique_IDs <- unique_IDs[-c(fake_IDs)]
map_est <- unlist(foreach(i = 1:ncol(s_df)) %do% {
as.numeric(bayestestR::map_estimate(s_df[, i]))
})
mdf <-
setNames(data.frame(
# Assign the ID as first column..
unique_IDs,
# ..and split into columns for each new variable
matrix(map_est, ncol=length(unique_vars))
),
# Assign column names via setNames
c("ID", unique_vars))
}
mdf$ID <- as.numeric(as.character(mdf$ID))
z_IDs <- grep("trans", mdf$ID)
if (length(z_IDs)) {
mdf[grepl("trans", mdf$ID), "ID"] <- unique(mdf$ID[!grepl("trans", mdf$ID)])
}
# Spot checks that estimates are v similar
#mdf[550:560]
# col_means[550:560]
# mdf$map_est[180:190]
# col_means[180:190]
# mdf$map_est[1:10]
# col_means[1:10]
mdf
}
These files are too big to upload, but the MAP estimates derived from them are in the Public_Data dir — see below
ReadTrace <- function(unique_str) read.csv(unique_str)
ndgr1 <- ReadTrace("../../model_res/final_traces_and_dics/traces/GR_run_also8k_ddm_add_trialwise_NO-SZ_s_vt_poutlier052177.csv")
ndgr2 <- ReadTrace("../../model_res/final_traces_and_dics/traces/GR_run_also8k_ddm_add_trialwise_NO-SZ_s_vt_poutlier052923.csv")
ndgr3 <- ReadTrace("../../model_res/final_traces_and_dics/traces/GR_run_also8k_ddm_add_trialwise_NO-SZ_s_vt_poutlier056294.csv")
ndgr4 <- ReadTrace("../../model_res/final_traces_and_dics/traces/GR_run_also8k_ddm_add_trialwise_NO-SZ_s_vt_poutlier058876.csv")
d1c <- rbind(ndgr1, ndgr2, ndgr3, ndgr4)
Fine
rhats <- unlist(foreach (i = 2:ncol(ndgr1)) %do% rstan::Rhat(cbind(
unlist(ndgr1[i]),
unlist(ndgr2[i]),
unlist(ndgr3[i]),
unlist(ndgr4[i]))))
hist(rhats)
rhats[which(rhats > 1.1)]
## numeric(0)
max(rhats)
## [1] 1.021465
# recovered <-
# read.csv("../../model_res/par_recov/HDDM_par_recov_traces_v-val_ddm_add_trialwise_NO-SZ_s_vt_with-z_1239.csv")
recovered <-
read.csv("./Public_Data/HDDM_par_recov_traces_v-val_ddm_add_trialwise_NO-SZ_s_vt_with-z_1239.csv")
#simmed <- read.csv("../../model_res/par_recov/HDDM_sims_for_pr_add_trialwise_NO-SZ_s_vt_poutlier059513.csv")
simmed <- read.csv("./Public_Data/HDDM_sims_for_pr_add_trialwise_NO-SZ_s_vt_poutlier059513.csv")
The across-trial variability parameters were run at the group level, so for these we’ll look at the range of means used to generate alongside the recovered posterior
generative_sv <- data.frame(simmed %>% group_by(subj_idx) %>% summarize(msv=mean(sim_sv)))
sv_plot <- ggplot(recovered, aes(x=sv)) +
geom_point(data=generative_sv, aes(x=msv, y=.2), size=3, pch=21, fill="gray57", color="black", alpha=.3) +
geom_point(data=generative_sv, aes(x=mean(msv), y=.2), size=5, pch=21, fill="gray57", color="black", alpha=.7) +
geom_density(color="gray57", fill="white", size=3, alpha=0) +
ga + lp +
xlab("") + ylab("") +
labs(title="Across-trial drift rate") +
#+ #ylab("Higher values = more pos. > neg.") +
theme(axis.text.x = element_text(size=25), axis.ticks=element_blank(), axis.text.y=element_blank()) +
theme(plot.title = element_text(margin=margin(b=-30),
size=12, hjust=.05, vjust=0.1))
sv_plot
# Add st
generative_st <- data.frame(simmed %>% group_by(subj_idx) %>% summarize(mst=mean(sim_st)))
st_plot <- ggplot(recovered, aes(x=st)) +
geom_point(data=generative_st, aes(x=mst, y=.2), size=3, pch=21, fill="gray57", color="black", alpha=.3) +
geom_point(data=generative_st, aes(x=mean(mst), y=.2), size=5, pch=21, fill="gray57", color="black", alpha=.7) +
geom_density(color="gray57", fill="white", size=3, alpha=0) +
ga + lp +
xlab("") + ylab("") +
labs(title="Across-trial non-decision time") +
#+ #ylab("Higher values = more pos. > neg.") +
theme(axis.text.x = element_text(size=25), axis.ticks=element_blank(), axis.text.y=element_blank()) +
theme(plot.title = element_text(margin=margin(b=-30),
size=12, hjust=.05, vjust=0.1))
st_plot
For the rest of the parameters we had subject-level estimates, and we examine the MAP estimates of the recovered traces against the generative subject-level parameters
PlotParRecov <- function(sim_df, opt_df, sim_name, rec_name, parameter_label=NULL) {
sim_df$simulated <- unlist(gen_vals[sim_name])
sim_df$recovered <- unlist(opt_df[rec_name])
cor_res <- cor.test(sim_df$simulated, sim_df$recovered)
lower_lim <- min(sim_df$simulated, sim_df$recovered)
upper_lim <- max(sim_df$simulated, sim_df$recovered)
pr_p <-
ggplot(sim_df, aes(x=simulated, y=recovered)) +
geom_line(aes(x=simulated, y=simulated), size=2) +
geom_point(size=7, alpha=.7, pch=21, fill="gray57") +
ga + ap +
labs(title=paste("r =", round(cor_res$estimate, 2)),
subtitle=parameter_label) +
theme(plot.subtitle = element_text(size = 25)) +
theme(axis.text = element_text(size = 15)) +
tp +
xlim(lower_lim, upper_lim) + ylim(lower_lim, upper_lim)
pr_p
}
gen_vals <- simmed %>% group_by(subj_idx) %>% summarize_at(
names(simmed)[grep("sim", names(simmed))], mean
)
rec_map <- GetMapEsts(GetSubjTraces(recovered), "short")
rec_map$conv_z <- InvLogit(rec_map$z_) # Transform recov z back on generative scale
ests <- c()
ests[1] <- cor.test(gen_vals$sim_a, rec_map$a_)$est
ests[2] <- cor.test(gen_vals$sim_t, rec_map$t_)$est
ests[3] <- cor.test(gen_vals$sim_z, rec_map$conv_z)$est
ests[4] <- cor.test(gen_vals$sim_vi, rec_map$v_Intercept_)$est
ests[5] <- cor.test(gen_vals$sim_vv, rec_map$v_val_ctr_)$est
ests[6] <- cor.test(gen_vals$sim_vs, rec_map$v_sess_ctr_)$est
ests[7] <- cor.test(gen_vals$sim_vsv, rec_map$v_val_ctr.sess_ctr_)$est
ests
## [1] 0.9665570 0.9608423 0.8139605 0.9264843 0.9706376 0.8341225 0.9384575
median(ests)
## [1] 0.9384575
range(ests)
## [1] 0.8139605 0.9706376
pr_plots <- list()
if (all(rec_map$ID==gen_vals$subj_idx)) {
pr_plots[[1]] <- PlotParRecov(gen_vals, rec_map, "sim_a", "a_", "Threshold (a)")
pr_plots[[2]] <- PlotParRecov(gen_vals, rec_map, "sim_t", "t_", "Non-decision time (t)")
pr_plots[[3]] <- PlotParRecov(gen_vals, rec_map, "sim_z", "conv_z", "Starting point bias (z)")
pr_plots[[4]] <- PlotParRecov(gen_vals, rec_map, "sim_vi", "v_Intercept_", "Drift rate intercept")
pr_plots[[5]] <- PlotParRecov(gen_vals, rec_map, "sim_vv", "v_val_ctr_", "Drift rate valence")
pr_plots[[6]] <- PlotParRecov(gen_vals, rec_map, "sim_vs", "v_sess_ctr_", "Drift rate time")
pr_plots[[7]] <- PlotParRecov(gen_vals, rec_map, "sim_vsv", "v_val_ctr.sess_ctr_", "Drift rate valence * time")
}
pr_grid <- (pr_plots[[1]] + pr_plots[[2]]) /
(pr_plots[[3]] + pr_plots[[4]]) /
(pr_plots[[5]] + pr_plots[[6]])
odd_pr_out <- pr_plots[[7]]
pr_grid
#ggsave("../paper/figs/pr-grid_SUPP.png", pr_grid, width=15, height=11, dpi=800)
odd_pr_out
#ggsave("../paper/figs/odd-pr_SUPP.png", odd_pr_out, width=7.5, height=11/3, dpi=800)
across_trial <- sv_plot + st_plot + plot_annotation(
"Recovered group posterior and generative parameters (mean and individual)",
theme = theme(plot.title = element_text(size = 15)))
across_trial
#ggsave("../paper/figs/across-trial_pr-SUPP.png", across_trial, width=8, height=4)
bx_df$session <- factor(bx_df$session, levels=c("Pre", "Post"))
pre_post_vends <- bx_df %>%
group_by(valence, session) %>%
summarize(mr=mean(response))
## `summarise()` has grouped output by 'valence'. You can override using the
## `.groups` argument.
pre_post_vends_id <- bx_df %>%
group_by(valence, session, subj_idx) %>%
summarize(mr=mean(response))
## `summarise()` has grouped output by 'valence', 'session'. You can override
## using the `.groups` argument.
pre_post_vends_id
## # A tibble: 384 × 4
## # Groups: valence, session [4]
## valence session subj_idx mr
## <chr> <fct> <int> <dbl>
## 1 negative Pre 1 0.556
## 2 negative Pre 2 0.393
## 3 negative Pre 3 0.233
## 4 negative Pre 4 0.103
## 5 negative Pre 6 0.148
## 6 negative Pre 7 0.407
## 7 negative Pre 8 0.423
## 8 negative Pre 9 0.467
## 9 negative Pre 10 0.286
## 10 negative Pre 11 0
## # … with 374 more rows
pp_sds <- pre_post_vends_id %>% group_by(valence, session) %>%
summarize(ci95=qnorm(.975)*sd(mr)/sqrt(length(unique(bx_df$subj_idx))))
## `summarise()` has grouped output by 'valence'. You can override using the
## `.groups` argument.
if (all(pp_sds[1:2] == pre_post_vends[1:2])) pre_post_vends$ci95 <- pp_sds$ci95
# Sanity check
#length(unique(bx_df$subj_idx))
# pre_post_vends_id %>% filter(valence == "negative" & session == "Pre") %>%
# select(mr) %>% summarize(sd(mr))/sqrt(96)*qnorm(.975)
#qnorm(.975)
#pre_post_vends_id <- bx_df %>% group_by(valence, session, subj_idx) %>% summarize(mr=mean(response))
ends_plot <- ggplot(pre_post_vends_id, aes(x=session, y=mr, group=session, fill=valence)) +
geom_jitter(pch=21, color="black", width=.16, size=3, alpha=.2) +
geom_line(data=pre_post_vends, aes(y=mr, group=valence), color="gray57", pch=21, size=1.5) +
geom_errorbar(data=pre_post_vends, aes(x=session,
ymin=mr-ci95, ymax=mr+ci95), color="black", inherit.aes=FALSE, size=1.5, width=.25) +
geom_point(data=pre_post_vends, aes(y=mr, fill=valence), pch=21, size=6, alpha=.8) +
ylab("proportion endorsements \n (with 95% CI)") + xlab("") +
scale_fill_manual(values=c("red", "blue")) +
ga + ap + tol
## Warning: Ignoring unknown parameters: shape
ends_plot
#ggsave("../paper/figs/ends_plot.png", ends_plot, width=11, height=7)
pre_post_rts <- bx_df %>% group_by(valence, session) %>% summarize(mr=mean(rt))
## `summarise()` has grouped output by 'valence'. You can override using the
## `.groups` argument.
pre_post_rts_id <- bx_df %>% group_by(valence, session, subj_idx) %>% summarize(mr=mean(rt))
## `summarise()` has grouped output by 'valence', 'session'. You can override
## using the `.groups` argument.
pprt_sds <- pre_post_rts_id %>% group_by(valence, session) %>%
summarize(ci95=qnorm(.975)*sd(mr)/sqrt(length(unique(bx_df$subj_idx))))
## `summarise()` has grouped output by 'valence'. You can override using the
## `.groups` argument.
if (all(pprt_sds[1:2] == pre_post_rts[1:2])) pre_post_rts$ci95 <- pprt_sds$ci95
rts_plot <- ggplot(pre_post_rts_id, aes(x=session, y=mr, group=session, fill=valence)) +
geom_jitter(pch=21, color="black", width=.16, size=3, alpha=.2) +
geom_line(data=pre_post_rts, aes(y=mr, group=valence), color="gray57", pch=21, size=1.5) +
geom_errorbar(data=pre_post_rts, aes(x=session,
ymin=mr-ci95, ymax=mr+ci95), color="black", inherit.aes=FALSE, size=1.5, width=.25) +
geom_point(data=pre_post_rts, aes(y=mr, fill=valence), pch=21, size=6, alpha=.8) +
ylab("mean reaction time \n (with 95% CI)") + xlab("") +
scale_fill_manual(values=c("red", "blue")) +
ga + ap + tol
## Warning: Ignoring unknown parameters: shape
rts_plot
#ggsave("../paper/figs/rt_plot.png", rts_plot, width=11, height=7)
Regression models
#scale(bx_df$valence_n)
summary (m1 <- glmer(response ~ val_ctr*sess_ctr +
(1|subj_idx), data=bx_df, family="binomial", control = glmerControl(optimizer = "bobyqa")))
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: response ~ val_ctr * sess_ctr + (1 | subj_idx)
## Data: bx_df
## Control: glmerControl(optimizer = "bobyqa")
##
## AIC BIC logLik deviance df.resid
## 8184.8 8221.4 -4087.4 8174.8 11051
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -5.4241 -0.3809 0.2075 0.3592 5.1879
##
## Random effects:
## Groups Name Variance Std.Dev.
## subj_idx (Intercept) 0.2043 0.452
## Number of obs: 11056, groups: subj_idx, 96
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.08590 0.05474 1.569 0.117
## val_ctr 2.03324 0.03127 65.025 < 2e-16 ***
## sess_ctr -0.02974 0.02943 -1.011 0.312
## val_ctr:sess_ctr 0.17231 0.02944 5.852 4.85e-09 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) vl_ctr sss_ct
## val_ctr 0.038
## sess_ctr 0.065 -0.008
## vl_ctr:sss_ -0.004 0.120 0.072
car::vif(m1)
## val_ctr sess_ctr val_ctr:sess_ctr
## 1.014929 1.005568 1.020222
# Singular - unsuprisingly bc there's just 2 levels of each per subj
# summary (m2 <- glmer(response ~ val_ctr*sess_ctr +
# (val_ctr + sess_ctr|subj_idx), data=bx_df, family="binomial", control = glmerControl(optimizer = "bobyqa")))
Using this one bc includes sensible and important RE and not singular
summary (m3 <- glmer(response ~ val_ctr*sess_ctr +
(val_ctr|subj_idx), data=bx_df, family="binomial", control = glmerControl(optimizer = "bobyqa")))
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: response ~ val_ctr * sess_ctr + (val_ctr | subj_idx)
## Data: bx_df
## Control: glmerControl(optimizer = "bobyqa")
##
## AIC BIC logLik deviance df.resid
## 7885.0 7936.2 -3935.5 7871.0 11049
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.9890 -0.3662 0.1725 0.3344 4.9228
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## subj_idx (Intercept) 0.1699 0.4121
## val_ctr 0.4699 0.6855 -0.08
## Number of obs: 11056, groups: subj_idx, 96
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.07915 0.05488 1.442 0.149
## val_ctr 2.18484 0.07837 27.878 < 2e-16 ***
## sess_ctr -0.03211 0.02975 -1.080 0.280
## val_ctr:sess_ctr 0.18136 0.02976 6.094 1.1e-09 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) vl_ctr sss_ct
## val_ctr -0.039
## sess_ctr 0.073 -0.004
## vl_ctr:sss_ -0.005 0.051 0.069
car::vif(m3)
## val_ctr sess_ctr val_ctr:sess_ctr
## 1.00267 1.00484 1.00746
summary (rm1 <- lmer(rt ~ val_ctr*sess_ctr +
(1|subj_idx), data=bx_df))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: rt ~ val_ctr * sess_ctr + (1 | subj_idx)
## Data: bx_df
##
## REML criterion at convergence: 12762.7
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.2574 -0.7110 -0.2027 0.5462 4.4470
##
## Random effects:
## Groups Name Variance Std.Dev.
## subj_idx (Intercept) 0.04649 0.2156
## Residual 0.17980 0.4240
## Number of obs: 11056, groups: subj_idx, 96
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 1.520e+00 2.237e-02 9.485e+01 67.954 <2e-16 ***
## val_ctr -3.703e-02 4.034e-03 1.096e+04 -9.180 <2e-16 ***
## sess_ctr -3.649e-02 4.033e-03 1.096e+04 -9.048 <2e-16 ***
## val_ctr:sess_ctr 5.267e-03 4.034e-03 1.096e+04 1.306 0.192
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) vl_ctr sss_ct
## val_ctr 0.001
## sess_ctr 0.000 0.001
## vl_ctr:sss_ 0.000 -0.001 0.006
car::vif(rm1)
## val_ctr sess_ctr val_ctr:sess_ctr
## 1.000001 1.000031 1.000031
Used for same reason
summary (rm2 <- lmer(rt ~ val_ctr*sess_ctr +
(val_ctr|subj_idx), data=bx_df))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: rt ~ val_ctr * sess_ctr + (val_ctr | subj_idx)
## Data: bx_df
##
## REML criterion at convergence: 12701.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.3659 -0.7089 -0.2026 0.5390 4.5891
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## subj_idx (Intercept) 0.046590 0.21585
## val_ctr 0.002408 0.04907 0.24
## Residual 0.177400 0.42119
## Number of obs: 11056, groups: subj_idx, 96
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 1.520e+00 2.239e-02 9.485e+01 67.901 < 2e-16 ***
## val_ctr -3.690e-02 6.414e-03 9.474e+01 -5.752 1.08e-07 ***
## sess_ctr -3.640e-02 4.007e-03 1.086e+04 -9.084 < 2e-16 ***
## val_ctr:sess_ctr 5.124e-03 4.007e-03 1.086e+04 1.279 0.201
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) vl_ctr sss_ct
## val_ctr 0.188
## sess_ctr 0.000 0.000
## vl_ctr:sss_ 0.000 -0.001 0.006
car::vif(rm1)
## val_ctr sess_ctr val_ctr:sess_ctr
## 1.000001 1.000031 1.000031
model_names <- c("baseline", "+ valence", "+ valence * time", "+ bias", "+ sv and st")
dic_values <-
c(
# read.csv("./../../model_res/final_traces_and_dics/dic/s_b_389_dic.csv", header=FALSE)$V2,
# read.csv("./../../model_res/final_traces_and_dics/dic/s_b1_3987_dic.csv", header=FALSE)$V2,
# read.csv("./../../model_res/final_traces_and_dics/dic/s_vt_dic.csv", header=FALSE)$V2,
# read.csv("./../../model_res/final_traces_and_dics/dic/GR_8k_s_vt_poutlier058886dic.csv", header=FALSE)$V2,
# read.csv("./../../model_res/final_traces_and_dics/dic/GR_run_also8k_ddm_add_trialwise_NO-SZ_s_vt_poutlier056294dic.csv", header=FALSE)$V2)
read.csv("./Public_Data/dic/s_b_389_dic.csv", header=FALSE)$V2,
read.csv("./Public_Data/dic/s_b1_3987_dic.csv", header=FALSE)$V2,
read.csv("./Public_Data/dic/s_vt_dic.csv", header=FALSE)$V2,
read.csv("./Public_Data/dic/GR_8k_s_vt_poutlier058886dic.csv", header=FALSE)$V2,
read.csv("./Public_Data/dic/GR_run_also8k_ddm_add_trialwise_NO-SZ_s_vt_poutlier056294dic.csv", header=FALSE)$V2)
dic_df <- data.frame(model_names, dic_values)
dic_df$delta_dic <- c(0, dic_values[2:5]-dic_values[1])
#dic_df$delta_dic <- as.numeric(c(0, unlist(dic_df[2:nrow(dic_df), 1]) - unlist(dic_df[1, 1])))
dic_df$model_names <- factor(dic_df$model_names, levels=c("baseline", "+ valence", "+ valence * time", "+ bias", "+ sv and st"))
dic_p <- ggplot(dic_df[2:5, ], aes(x=model_names, y=delta_dic)) +
geom_bar(stat="identity", color="black", fill="white", size=2) +
ga + ap + tol +
ylab(TeX("$\\Delta$ DIC")) +
xlab("") +
labs(title = "Model comparison",
subtitle = "Relative to baseline (no regressors) model") +
theme(plot.subtitle = element_text(size = 15)) +
#ggtitle("Model comparison relative to baseline") +
tp +
theme(axis.text.x = element_text(angle=30, hjust=1, size=20))#+ xlim(500, -1200)
dic_p
#ggsave("../paper/figs/dic.png", dic_p, width=8, height=6, dpi=600)
val_posteriors <- ggplot(d1c, aes(x=v_val_ctr)) +
geom_vline(xintercept=0, size=2) +
geom_density(color="chocolate", fill="white", alpha=1, size=3) +
geom_density(aes(x=v_val_ctr.sess_ctr), color="tan1", fill="white", alpha=1, size=3) +
xlim(-.1, 2) + #geom_vline(xintercept = 0, color="gray57", size=2)
ga + lp +
xlab("") + ylab("") + #ylab("Higher values = more pos. > neg.") +
labs(title="Drift valence (dark) and valence*time (light) \n regressor posteriors") +
theme(axis.text.x = element_text(size=35), axis.ticks=element_blank(), axis.text.y=element_blank()) +
theme(plot.title = element_text(margin=margin(b=-30),
size=30, hjust=.18, vjust=.6))
val_posteriors
#ggsave("../paper/figs/val_posteriors.png", val_posteriors, width=12, height=7)
bayestestR::map_estimate(d1c$v_val_ctr)
## MAP Estimate: 1.33
which(d1c$v_val_ctr < 0)
## integer(0)
#hist(d1c$v_val_ctr)
bayestestR::map_estimate(d1c$v_val_ctr.sess_ctr)
## MAP Estimate: 0.13
which(d1c$v_val_ctr.sess_ctr < 0)
## integer(0)
#hist(d1c$v_val_ctr.sess_ctr)
Positive and negative pre and differences therein
nends_b <- bx_df %>% filter(session=="Pre", valence=="negative") %>%
group_by(subj_idx) %>% summarize(m=mean(response), mrt=mean(rt))
pends_b <- bx_df %>% filter(session=="Pre", valence=="positive") %>% group_by(subj_idx) %>%
summarize(m=mean(response), mrt=mean(rt))
if (all(pends_b$subj_idx==nends_b$subj_idx)) {
pends_b$pn_diff <- pends_b$m-nends_b$m
pends_b$pn_rt_diff <- pends_b$mrt-nends_b$mrt
}
# pends_b$mrt
# nends_b$mrt
RTs
rt <- bx_df %>%
group_by(subj_idx) %>% summarize(m=mean(rt), mrt=mean(rt))
rt_val <- bx_df %>%
group_by(subj_idx, valence) %>% summarize(mrt=mean(rt))
## `summarise()` has grouped output by 'subj_idx'. You can override using the
## `.groups` argument.
rt_val_short <- data.frame(
"ID"=unique(rt_val$subj_idx),
"overall_rt_diff"=rt_val[rt_val$valence == "positive", "mrt"]-rt_val[rt_val$valence == "negative", "mrt"])
Endorsements at post and differences from pre to post
nends_p <- bx_df %>% filter(session=="Post", valence=="negative") %>%
group_by(subj_idx) %>% summarize(m=mean(response), mrt=mean(rt))
pends_p <- bx_df %>% filter(session=="Post", valence=="positive") %>% group_by(subj_idx) %>% summarize(m=mean(response), mrt=mean(rt))
if (all(pends_p$subj_idx==nends_p$subj_idx)) {
pends_p$pn_diff <- pends_p$m-nends_p$m
pends_p$pn_rt_diff <- pends_p$mrt-nends_p$mrt
}
d1m <- GetMapEsts(GetSubjTraces(d1c), "short")
The traces are massive so can’t put these in the public data folder, however have uploaded the map ests — thus these can be read in to reproduce further analysis
#write.csv(d1m, "./Public_Data/short_form_map_ests.csv")
d1m <- read.csv("./Public_Data/short_form_map_ests.csv")
Write
rt <- bx_df %>%
group_by(subj_idx) %>% summarize(m=mean(rt), mrt=mean(rt))
if (all(d1m$ID==pends_p$subj_idx) & all(d1m$ID == pends_b$subj_idx)) {
cat("\n DDM baseline ends"); print(cor.test(d1m$v_val_ctr_, pends_b$pn_diff))
cat("\n DDM baseline rt"); print(cor.test(d1m$v_val_ctr_, pends_b$pn_rt_diff))
cat("\n DDM change ends");print(cor.test(d1m$v_val_ctr.sess_ctr_, pends_p$pn_diff-pends_b$pn_diff))
cat("\n DDM change rt"); print(cor.test(d1m$v_val_ctr.sess_ctr_, pends_p$pn_rt_diff-pends_b$pn_rt_diff))
}
##
## DDM baseline ends
## Pearson's product-moment correlation
##
## data: d1m$v_val_ctr_ and pends_b$pn_diff
## t = 12.389, df = 94, p-value < 2.2e-16
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.6971071 0.8532895
## sample estimates:
## cor
## 0.787528
##
##
## DDM baseline rt
## Pearson's product-moment correlation
##
## data: d1m$v_val_ctr_ and pends_b$pn_rt_diff
## t = -0.61816, df = 94, p-value = 0.538
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.2607878 0.1386257
## sample estimates:
## cor
## -0.06362865
##
##
## DDM change ends
## Pearson's product-moment correlation
##
## data: d1m$v_val_ctr.sess_ctr_ and pends_p$pn_diff - pends_b$pn_diff
## t = 8.11, df = 94, p-value = 1.876e-12
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.5062421 0.7461185
## sample estimates:
## cor
## 0.6416083
##
##
## DDM change rt
## Pearson's product-moment correlation
##
## data: d1m$v_val_ctr.sess_ctr_ and pends_p$pn_rt_diff - pends_b$pn_rt_diff
## t = 0.61594, df = 94, p-value = 0.5394
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.1388492 0.2605754
## sample estimates:
## cor
## 0.06340168
# This is the overall RT valence RT averaging over time point
if (all(d1m$ID == rt_val_short$ID)) cat("\n DDM val rt"); print(cor.test(d1m$v_val_ctr_, rt_val_short$mrt))
##
## DDM val rt
##
## Pearson's product-moment correlation
##
## data: d1m$v_val_ctr_ and rt_val_short$mrt
## t = 0.53113, df = 94, p-value = 0.5966
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.1474027 0.2524175
## sample estimates:
## cor
## 0.05469969
if (all(d1m$ID == rt$subj_idx)) cat("\n DDM threshold - rt"); print(cor.test(d1m$a_, rt$m))
##
## DDM threshold - rt
##
## Pearson's product-moment correlation
##
## data: d1m$a_ and rt$m
## t = 9.2603, df = 94, p-value = 6.877e-15
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.5690004 0.7827845
## sample estimates:
## cor
## 0.6906943
s_bdf <- bx_df
#m1_ddm_ppcs <- read.csv("../../model_res/ppcs/HDDM_ppcs_v-val_ddm_add_trialwise_NO-SZ_s_vt_with-z_9533.csv")
m1_ddm_ppcs <- read.csv("./Public_Data/HDDM_ppcs_v-val_ddm_add_trialwise_NO-SZ_s_vt_with-z_9533.csv")
Recode response
m1_ddm_ppcs[m1_ddm_ppcs$sess_ctr < 0, "session"] <- "Pre"
m1_ddm_ppcs[m1_ddm_ppcs$sess_ctr > 0, "session"] <- "Post"
m1_ddm_ppcs$session <- factor(m1_ddm_ppcs$session, levels=c("Pre", "Post"))
m1_ddm_ppcs[m1_ddm_ppcs$val_ctr > 0, "valence"] <- "positive"
m1_ddm_ppcs[m1_ddm_ppcs$val_ctr < 0, "valence"] <- "negative"
m1_ddm_ppcs$valence <- factor(m1_ddm_ppcs$valence, levels=c("negative", "positive"))
#qp_no_t$response <- factor(qp_no_t$response, levels=c("no", "yes"))
hist(m1_ddm_ppcs$rt, breaks = 100, xlim = c(0, 4))
Quantiles look pretty good
quantile(s_bdf$rt, probs = seq(0.05, .95, .1))
## 5% 15% 25% 35% 45% 55% 65% 75%
## 0.901915 1.045560 1.151572 1.255880 1.360530 1.483183 1.627107 1.799898
## 85% 95%
## 2.042533 2.460470
quantile(m1_ddm_ppcs$rt, probs = seq(0.05, .95, .1))
## 5% 15% 25% 35% 45% 55% 65% 75%
## 0.8895856 1.0505985 1.1580293 1.2532260 1.3480761 1.4507999 1.5748151 1.7384998
## 85% 95%
## 1.9930176 2.5840549
Code flip rt for PPC plots
s_bdf$flip_rt <- s_bdf$rt
s_bdf[s_bdf$response==0, "flip_rt"] <- -s_bdf[s_bdf$response==0, "flip_rt"]
m1_ddm_ppcs$flip_rt <- m1_ddm_ppcs$rt
m1_ddm_ppcs[m1_ddm_ppcs$response==0, "flip_rt"] <- -m1_ddm_ppcs[m1_ddm_ppcs$response==0, "flip_rt"]
m1_ddm_ppcs$flip_rt <- m1_ddm_ppcs$rt
m1_ddm_ppcs[m1_ddm_ppcs$response==0, "flip_rt"] <- -m1_ddm_ppcs[m1_ddm_ppcs$response==0, "flip_rt"]
Cap the PPCs at 3 because that’s where the task cut off
m1_ddm_ppcs <- m1_ddm_ppcs %>% filter(rt < 3)
Create alt negative coded rt with negative side flipped so lines up with QP plots
m1_ddm_ppcs$alt_rt <- m1_ddm_ppcs$flip_rt
s_bdf$alt_rt <- s_bdf$flip_rt
#m1_ddm_ppcs
m1_ddm_ppcs[m1_ddm_ppcs$valence=="negative", "alt_rt"] <- -m1_ddm_ppcs[m1_ddm_ppcs$valence=="negative", "alt_rt"]
s_bdf[s_bdf$valence=="negative", "alt_rt"] <- -s_bdf[s_bdf$valence=="negative", "alt_rt"]
ddm_alt <- ggplot(s_bdf, aes(x=alt_rt)) +
geom_density(alpha=.1, size=1.3, color="black", fill="black") +
geom_density(data=m1_ddm_ppcs, alpha=0, color="darkorange",
size=1.3, fill="red", linetype="longdash") +
facet_wrap( ~ valence) +
ga + ap + ft + tp + xlab("reaction time") + xlim(-3.1, 3.1) +
theme(axis.text=element_blank(), axis.ticks = element_blank()) +
ylab("")
ddm_alt
#ggsave("../paper/figs/big_ppc_alt.png", ddm_alt, width=9.25, height=3)
X axis - response frequency
ddm_neg <-
table(m1_ddm_ppcs[m1_ddm_ppcs$valence=="negative", "response"])[2]/
sum(table(m1_ddm_ppcs[m1_ddm_ppcs$valence=="negative", "response"]))
ddm_pos <-
table(m1_ddm_ppcs[m1_ddm_ppcs$valence=="positive", "response"])[2]/
sum(table(m1_ddm_ppcs[m1_ddm_ppcs$valence=="positive", "response"]))
emp_neg <-
table(s_bdf[s_bdf$valence=="negative", "response"])[2]/
sum(table(s_bdf[s_bdf$valence=="negative", "response"]))
emp_pos <-
table(s_bdf[s_bdf$valence=="positive", "response"])[2]/
sum(table(s_bdf[s_bdf$valence=="positive", "response"]))
Y axis - reaction time quantiles
qs <- seq(0.1, .9, .2)
CalculateRTQuantile <- function(val_resp_df, qs) {
### Get subject average quantiles at a given valence-response level ###
all_subj_quantiles <-
lapply(split(val_resp_df, val_resp_df$subj_idx), function(x) {
out <-
data.table(unique(x$subj_idx),
unique(x$valence),
unique(x$response),
quantile(x$rt, probs = qs),
qs) %>%
setNames(c("ID", "valence", "response", "quantiles", "qs"))
out
}) %>% bind_rows()
all_subj_quantiles
}
ddm <-
data.table(
rbind(
CalculateRTQuantile(m1_ddm_ppcs %>% filter(valence=="negative", response==1), qs),
CalculateRTQuantile(m1_ddm_ppcs %>% filter(valence=="positive", response==1), qs),
CalculateRTQuantile(m1_ddm_ppcs %>% filter(valence=="negative", response==0), qs),
CalculateRTQuantile(m1_ddm_ppcs %>% filter(valence=="positive", response==0), qs)),
"model"="ddm"
)
emp <- data.table(
rbind(
CalculateRTQuantile(s_bdf %>% filter(valence=="negative", response==1), qs),
CalculateRTQuantile(s_bdf %>% filter(valence=="positive", response==1), qs),
CalculateRTQuantile(s_bdf %>% filter(valence=="negative", response==0), qs),
CalculateRTQuantile(s_bdf %>% filter(valence=="positive", response==0), qs)),
"model"="empirical")
all_subj_qs <- rbind(ddm, emp)
Put it all together
q_rt_sum <- all_subj_qs %>% group_by(model, valence, response, qs) %>% summarize(q=mean(quantiles))
## `summarise()` has grouped output by 'model', 'valence', 'response'. You can
## override using the `.groups` argument.
#q_rt_sum %>% filter(response==1 && valence=="negative", model=="ddm")
#ddm_neg
qp_df <-
rbind(
## DDM
# For each model-valence, use the RT quantiles at response 1 and 0, whose response probs are p(endorse) and 1-p(endorse)
q_rt_sum %>% filter(response==1 && valence=="negative", model=="ddm") %>% mutate(rp=ddm_neg),
q_rt_sum %>% filter(response==0 && valence=="negative", model=="ddm") %>% mutate(rp=1-ddm_neg),
q_rt_sum %>% filter(response==1 && valence=="positive", model=="ddm") %>% mutate(rp=ddm_pos),
q_rt_sum %>% filter(response==0 && valence=="positive", model=="ddm") %>% mutate(rp=1-ddm_pos),
## Empirical
q_rt_sum %>% filter(response==1 && valence=="negative", model=="empirical") %>% mutate(rp=emp_neg),
q_rt_sum %>% filter(response==0 && valence=="negative", model=="empirical") %>% mutate(rp=1-emp_neg),
q_rt_sum %>% filter(response==1 && valence=="positive", model=="empirical") %>% mutate(rp=emp_pos),
q_rt_sum %>% filter(response==0 && valence=="positive", model=="empirical") %>% mutate(rp=1-emp_pos)
)
qp_df$model <- factor(qp_df$model, levels=c("empirical", "ddm"))
qp_plot <- ggplot(qp_df, aes(x=rp, y=q, fill=model)) + geom_point(pch=21, size=6, alpha=.7) +
facet_wrap( ~ valence) + ga + ap + lp + ft +
ylab("reaction time") + xlab("response frequency") +
scale_fill_manual(values=c("black", "darkorange"), labels=c("Empirical", "DDM")) +
theme(axis.text.x = element_text(angle=30, hjust=1, size=20)) +
theme(legend.position = c(.25, .65)) #+ xlim(500, -1200)
qp_plot
#ggsave("../paper/figs/qp_plot.png", qp_plot, width=10, height=4.25)
# probably don't need this plot
# ppc_lf <- rbind(
# data.table(m1_ddm_ppcs %>% select("subj_idx", "response", "valence", "flip_rt"), "ddm"),
# data.table(lm1_ppc %>% select("subj_idx", "response"="responses", "valence", "flip_rt"), "levy"),
# data.table(s_bdf %>% select("subj_idx", "response", "valence", "flip_rt"), "empirical")
# ) %>% setNames(c("ID", "response", "valence", "flip_rt", "model"))
#
# table(ppc_lf$response)
#
# # Summarize neg yes
# ny <- ppc_lf %>% filter( valence=="negative") %>% group_by(ID, model) %>% summarize(mr=mean(response))
#
#
# sd(as.numeric(unlist(ny[ny$model=="ddm", "mr"])))/sqrt(length(unique(bx_df$subj_idx)))
# nys <- ny %>% group_by(model) %>% summarize(m=mean(mr))
#
# nys$se <-
# c(
# sd(as.numeric(unlist(ny[ny$model=="ddm", "mr"])))/sqrt(length(unique(bx_df$subj_idx))),
# sd(as.numeric(unlist(ny[ny$model=="levy", "mr"])))/sqrt(length(unique(bx_df$subj_idx))),
# sd(as.numeric(unlist(ny[ny$model=="empirical", "mr"])))/sqrt(length(unique(bx_df$subj_idx)))
# )
#
# # c(
# # sd(unlist(ppc_lf[ppc_lf$model=="ddm", "response"]))/sqrt(length(unique(bx_df$subj_idx))), # ** make sure right
# # sd(unlist(ppc_lf[ppc_lf$model=="levy", "response"]))/sqrt(length(unique(bx_df$subj_idx))),
# # sd(unlist(ppc_lf[ppc_lf$model=="empirical", "response"]))/sqrt(length(unique(bx_df$subj_idx))))
#
#
# pd <- .25
# ny$model <- factor(ny$model, levels=c("ddm", "empirical", "levy"))
# neg <- ggplot(ny, aes(x=model, y=mr, group=ID, fill=model)) +
# #geom_line(position=position_dodge(width=.1), alpha=.16) +
# geom_point(width=.1, position=position_dodge(width=.18), alpha=1, size = 2, pch=21) +
# scale_fill_manual(values=c("orange", "gray57", "purple")) +
# # geom_errorbar(data=nys, aes(x=model, ymax=m+se, ymin=m-se), inherit.aes = FALSE, size=2.2, width=.15, color="blue") +
# geom_point(data=nys, aes(x=model, y=m), inherit.aes = FALSE, size=8, pch=21, fill="white") +
#
# geom_hline(yintercept = c(unlist(nys[nys$model=="empirical", "m"])), size=1.2) +
# ylim(0, 1) + ga + ap + tol + xlab("") + ylab("response \n frequency") +
# theme(axis.text.x=element_blank(), axis.ticks.x=element_blank(), axis.title = element_text(size=55))
#
# neg
#
# nys
# Summarize neg yes
# ny <- ppc_lf %>% filter( valence=="positive") %>% group_by(ID, model) %>% summarize(mr=mean(response))
#
# nys <- ny %>% group_by(model) %>% summarize(m=mean(mr))
#
# nys$se <-
# c(
# sd(as.numeric(unlist(ny[ny$model=="ddm", "mr"])))/sqrt(length(unique(bx_df$subj_idx))),
# sd(as.numeric(unlist(ny[ny$model=="levy", "mr"])))/sqrt(length(unique(bx_df$subj_idx))),
# sd(as.numeric(unlist(ny[ny$model=="empirical", "mr"])))/sqrt(length(unique(bx_df$subj_idx)))
# )
# #
# # c(
# # sd(unlist(ppc_lf[ppc_lf$model=="ddm", "response"]))/sqrt(length(unique(bx_df$subj_idx))), # ** make sure right
# # sd(unlist(ppc_lf[ppc_lf$model=="levy", "response"]))/sqrt(length(unique(bx_df$subj_idx))),
# # sd(unlist(ppc_lf[ppc_lf$model=="empirical", "response"]))/sqrt(length(unique(bx_df$subj_idx))))
#
#
# pd <- .25
# ny$model <- factor(ny$model, levels=c("ddm", "empirical", "levy"))
# pos <- ggplot(ny, aes(x=model, y=mr, group=ID, fill=model)) +
# geom_point(width=.1, position=position_dodge(width=.18), alpha=1, size = 2, pch=21) +
# scale_fill_manual(values=c("orange", "gray57", "purple")) +
# # geom_errorbar(data=nys, aes(x=model, ymax=m+se, ymin=m-se), inherit.aes = FALSE, size=2.2, width=.15, color="blue") +
# geom_point(data=nys, aes(x=model, y=m), inherit.aes = FALSE, size=8, pch=21, fill="white") +
#
# geom_hline(yintercept = c(unlist(nys[nys$model=="empirical", "m"])), size=1.2) +
# ylim(0, 1) + ga + ap + tol + xlab("") + ylab("") +
# theme(axis.text=element_blank(), axis.ticks=element_blank(), axis.title = element_text(size=40)) #+
# #scale_fill_manual(values=c("black", "orange", "purple"), labels=c("Empirical", "DDM", "Lévy"))
# #+ xlim(500, -1200)
#
# #pos
# both <- neg+pos
# both
#ggsave("../paper/figs/ppc_freqs.png", both, width=18, height=6)
completer_ids <- unique(bx_df$subj_idx)
#item_qairre_df <- read.csv("./../../data/raw_files/justDASSandFFMQ_with_imputes_replaced_with_999.csv")
item_qairre_df <- read.csv("./Public_Data/justDASSandFFMQ_with_imputes_replaced_with_999.csv")
#all(item_qairre_df$ID.ffmq==item_qairre_df$ID.DASS) # Pulled from different excel sheets so this double check IDs lined up exactly
# No reverse scoring
dass_dep_qs <- c(3, 5, 10, 13, 16, 17, 21, 24, 26, 31, 34, 37, 38, 42)
iq_c <- item_qairre_df %>% filter(ID.ffmq %in% completer_ids)
DASS
iq_c_d <- iq_c[grep("DASS", names(iq_c))]
Time 1 - just depression subscale
# Time 1
dd1 <- iq_c_d[paste0("DASS1.", dass_dep_qs)]
# 1.26% of baseline data missing
sum(length(which(dd1==999)), length(which(is.na(dd1))))/(dim(dd1)[1]*dim(dd1)[2])*100
## [1] 1.264881
dd1 <- data.frame(dd1)
dd1[dd1==999] <- NA
which(is.na(rowSums(dd1))) # 4 pts have NAs
## [1] 2 42 79 83
# 1 all data missing, the other just a couple for each
dd1[which(is.na(rowSums(dd1))), ]
## DASS1.3 DASS1.5 DASS1.10 DASS1.13 DASS1.16 DASS1.17 DASS1.21 DASS1.24
## 2 2 1 3 2 1 2 2 2
## 42 NA NA NA NA NA NA NA NA
## 79 1 1 1 NA 1 0 0 0
## 83 2 3 3 2 2 1 1 2
## DASS1.26 DASS1.31 DASS1.34 DASS1.37 DASS1.38 DASS1.42
## 2 2 1 1 1 1 NA
## 42 NA NA NA NA NA NA
## 79 1 0 0 0 0 0
## 83 2 2 NA 1 1 3
# Put in ID
dd1 <- data.frame("ID"=iq_c_d$ID.DASS, dd1)
# For robustness check, casewise delete pts with any data missing
dd1_casewise_delete <- dd1[-which(is.na(rowSums(dd1))), ]
dd1_casewise_delete$dass_sum_score <- rowSums(dd1_casewise_delete[2:15])
#dd1[which((is.na(rowSums(dd1[2:15])))), ]
id_to_del <- 50 # This is the pt with all missing—since have no data, casewise delete this person
dd1 %>% filter(ID == id_to_del)
## ID DASS1.3 DASS1.5 DASS1.10 DASS1.13 DASS1.16 DASS1.17 DASS1.21 DASS1.24
## 1 50 NA NA NA NA NA NA NA NA
## DASS1.26 DASS1.31 DASS1.34 DASS1.37 DASS1.38 DASS1.42
## 1 NA NA NA NA NA NA
dd1 <- dd1[!dd1$ID==50, ]
# .21% of items missing after excluding all missing
length(which(is.na(dd1)))/(dim(dd1)[1]*dim(dd1)[2])*100
## [1] 0.2105263
# Replace any NAs with the mean of the remainding items
for (r in 1:nrow(dd1)) {
if (any(is.na(dd1[r, 2:15]))) {
this_row <- as.numeric(unlist(dd1[r, 2:15])) # Exclude the ID
# Mean to replace NAs with
mean_non_na <- mean(this_row[which(!is.na(this_row))])
this_row[is.na(as.numeric(unlist(this_row)))] <- mean_non_na
# Put in df
dd1[r, 2:15] <- this_row
}
}
dd1$dass_sum_score <- rowSums(dd1[2:15])
#gdf <- haven::read_sav("./../data/raw_files/Britton_ConcurrentStudyA_7.11.16.sav")
#gdf$DASS_Depression_SUM_Q1==rowSums(dd1)
mean(dd1$dass_sum_score) # Depression only
## [1] 9.578138
sd(dd1$dass_sum_score)
## [1] 7.694246
Time 2
# Time 2
# 2.31 % of final data missing
dd2 <- iq_c_d[paste0("DASS2.", dass_dep_qs)]
sum(length(which(dd2==999)), length(which(is.na(dd2))))/ (dim(dd2)[1]*dim(dd2)[2])*100
## [1] 2.306548
dd2[dd2==999] <- NA
which(is.na(rowSums(dd2))) # 3pts have NAs
## [1] 48 50 51
# 1 all data missing, the other just a couple for each
dd2[which(is.na(rowSums(dd2))), ]
## DASS2.3 DASS2.5 DASS2.10 DASS2.13 DASS2.16 DASS2.17 DASS2.21 DASS2.24
## 48 NA NA NA NA NA NA NA NA
## 50 NA NA NA NA NA NA NA NA
## 51 0 NA 1 NA 1 0 1 1
## DASS2.26 DASS2.31 DASS2.34 DASS2.37 DASS2.38 DASS2.42
## 48 NA NA NA NA NA NA
## 50 NA NA NA NA NA NA
## 51 1 1 NA 1 0 0
# Put in ID
dd2 <- data.frame("ID"=iq_c_d$ID.DASS, dd2)
# For robustness check, casewise delete pts with any data missing
dd2_casewise_delete <- dd2[-which(is.na(rowSums(dd2))), ]
dd2_casewise_delete$dass_sum_score <- rowSums(dd2_casewise_delete[2:15])
# Delete pts with all data missing
ids_to_del <- c(56, 58)
dd2 <- dd2 %>% filter(!ID %in% ids_to_del)
# .21% of items missing
length(which(is.na(dd2)))/(dim(dd2)[1]*dim(dd2)[2])*100
## [1] 0.212766
# Replace the one person missing just a couple items with the mean
mean_to_repl <- mean(unlist(dd2[dd2$ID==59, 2:15][which(!is.na(dd2[dd2$ID==59, 2:15]))]))
dd2[dd2$ID==59, which(is.na(dd2[dd2$ID==59, ]))] <- mean_to_repl
dd2$dass_sum_score <- rowSums(dd2[2:15])
dd2$dass_sum_score
## [1] 5.000000 17.000000 1.000000 8.000000 1.000000 2.000000 3.000000
## [8] 0.000000 4.000000 13.000000 3.000000 0.000000 24.000000 1.000000
## [15] 2.000000 1.000000 10.000000 0.000000 0.000000 6.000000 0.000000
## [22] 0.000000 3.000000 1.000000 0.000000 0.000000 0.000000 11.000000
## [29] 0.000000 2.000000 8.000000 0.000000 5.000000 8.000000 2.000000
## [36] 1.000000 18.000000 16.000000 2.000000 14.000000 2.000000 2.000000
## [43] 4.000000 11.000000 1.000000 1.000000 5.000000 0.000000 8.909091
## [50] 6.000000 0.000000 1.000000 3.000000 0.000000 4.000000 8.000000
## [57] 2.000000 1.000000 2.000000 0.000000 19.000000 0.000000 6.000000
## [64] 15.000000 1.000000 22.000000 4.000000 5.000000 5.000000 0.000000
## [71] 0.000000 1.000000 2.000000 4.000000 1.000000 4.000000 0.000000
## [78] 8.000000 2.000000 0.000000 7.000000 3.000000 9.000000 14.000000
## [85] 1.000000 8.000000 6.000000 4.000000 1.000000 0.000000 0.000000
## [92] 2.000000 3.000000 2.000000
#any(is.na(dd2))
dass_time_1 <- dd1
dass_time_2 <- dd2
Make sure the DASS are numerically ordered, then give them names
order <- as.numeric(unlist(
lapply(names(dass_time_1)[grep("DASS", names(dass_time_1))], function(x) unlist(map(strsplit(x, "[.]"), 2)))
))
if (all(unlist(foreach(i = 1:(length(order)-1)) %do% { order[i] < order[i+1]}))) {
extra_d1 <- dass_time_1[names(dass_time_1)[grep("DASS", names(dass_time_1))]] %>%
setNames(c(
# 3, 5, 10, 13
"no-positive-feelings", "couldnt-get-going", "easily-upset", "sad-and-depressed",
# 16, 17, 21
"lost-interest", "not-worth-much-as-person", "life-not-worthwhile",
# 24, 26, 31, 34, 37,
"no-enjoyment", "down-hearted-and-blue", "no-enthusiasm", "worthless", "no-hope",
#38, 42
"life-meaningless", "no-initiative"
))
dass_time_1 <- data.frame(extra_d1, dass_time_1)
}
# Spot checks
#dass_time_1$no.initiative==dd1$DASS1.42
# dass_time_1$no.enjoyment==dd1$DASS1.24
# dass_time_1$life.meaningless==dd1$DASS1.38
# dass_time_1$sad.and.depressed==dd1$DASS1.13
order2 <- as.numeric(unlist(
lapply(names(dass_time_2)[grep("DASS", names(dass_time_2))], function(x) unlist(map(strsplit(x, "[.]"), 2)))
))
if (all(unlist(foreach(i = 1:(length(order2)-1)) %do% { order[i] < order2[i+1]}))) {
extra_d2 <- dass_time_2[names(dass_time_2)[grep("DASS", names(dass_time_2))]] %>%
setNames(c(
# 3, 5, 10, 13
"no-positive-feelings", "couldnt-get-going", "easily-upset", "sad-and-depressed",
# 16, 17, 21
"lost-interest", "not-worth-much-as-person", "life-not-worthwhile",
# 24, 26, 31, 34, 37,
"no-enjoyment", "down-hearted-and-blue", "no-enthusiasm", "worthless", "no-hope",
#38, 42
"life-meaningless", "no-initiative"
))
dass_time_2 <- data.frame(extra_d2, dass_time_2)
}
# Spot checks
#dass_time_2$no.initiative==dd2$DASS2.42
#dass_time_2$down.hearted.and.blue==dd2$DASS2.26
#dass_time_2$no.positive.feelings==dd2$DASS2.3
#dass_time_2$sad.and.depressed==dd2$DASS2.13
Create a long-form version
d1_short <- dass_time_1 %>% filter(ID %in% dass_time_2$ID)
d1_lf <- d1_short[1:(length(dass_dep_qs)+1)] %>%
pivot_longer(cols = no.positive.feelings:no.initiative,
names_to = "item",
values_to = "scores"
)
d2_short <- dass_time_2 %>% filter(ID %in% dass_time_1$ID)
if (all(d1_short$ID==d2_short$ID)) {
change_extra <- d2_short[1:length(dass_dep_qs)] - d1_short[1:length(dass_dep_qs)]
change_df <- data.frame(
change_extra, change_extra %>% setNames(paste0("change_", names(change_extra))))
}
# Changes the order so not working yet
labs <- unlist(lapply(strsplit(unique(d1_lf$item), "[.]"), function(x) {
out <- paste0(x, collapse=" ")
out
}))
Drop the few imputed ones for vis
d1_lf <- d1_lf[-c(which(d1_lf$scores %% 1 != 0)), ]
Baseline
d1_summs <- d1_lf %>% group_by(item) %>% summarize(m=mean(scores))
d1_summs_ID <- d1_lf %>% group_by(item, ID) %>% summarize(m=mean(scores))
## `summarise()` has grouped output by 'item'. You can override using the
## `.groups` argument.
d1_sds <- d1_lf %>% group_by(item) %>%
summarize(ci95=qnorm(.975)*sd(scores)/sqrt(length(unique(bx_df$subj_idx))))
if (all(d1_summs$item==d1_sds$item)) d1_summs$ci95 <- d1_sds$ci95
d1_summs_arr <-
d1_summs %>% arrange(m)
#d1_summs_arr$item_f <- factor(ch_summs_arr$item, levels=rev(ch_summs_arr$item))
d1_lf$item_f <- factor(d1_lf$item, levels=c(d1_summs_arr$item))
base_labs <- as.character(levels(d1_lf$item_f))
#unlist(map(strsplit(levels(d1_lf$item_f), "change_"), 2))
b_labs <- unlist(lapply(strsplit(base_labs, "[.]"), function(x) {
out <- paste0(x, collapse=" ")
out
}))
a_d <- ggplot(d1_lf, aes(x=scores, y=item_f)) +
geom_vline(xintercept=seq(0, 3, 1), size=1.5, color="gray57") +
geom_jitter(alpha=.3, size=2.5, width=.1, height=0.28, pch=21, color="darkred", fill="white") +
#geom_hline(yintercept=seq(1, 15, 1)) +
geom_errorbar(data=d1_summs, aes(x=m, y=item,
xmin=m-ci95, xmax=m+ci95), color="darkred",
inherit.aes=FALSE, size=2.2, width=.4) +
geom_point(data=d1_summs, aes(x=m, y=item), pch=21,
fill="white", color="black", size=6, alpha=.9) +
ga + ap + theme(axis.text.y =element_text(size=11)) +
theme(axis.text.x =element_text(size=15)) +
theme(axis.title.x =element_text(size=18)) +
ylab("") +
labs(title="Depression symptoms \nat baseline") + tp +
xlab("ratings") +
scale_y_discrete(labels=b_labs)
#d1_summs # Spot check that life meaningless and life worthless are low means , easily upset in middle, couldnt get going at top
a_d
Change
change_for_p <- data.frame("ID"=d2_short$ID, change_df[grep("change", names(change_df))])
ch_lf <- change_for_p %>%
pivot_longer(cols = change_no.positive.feelings:change_no.initiative,
names_to = "item",
values_to = "scores"
)
ch_lf <- ch_lf[-c(which(ch_lf$scores %% 1 != 0)), ]
#which(ch_lf$scores %% 1 !=0)
ch_summs <- ch_lf %>% group_by(item) %>% summarize(m=mean(scores))
ch_sds <- ch_lf %>% group_by(item) %>%
summarize(ci95=qnorm(.975)*sd(scores)/sqrt(length(unique(bx_df$subj_idx))))
if (all(ch_summs$item==ch_sds$item)) ch_summs$ci95 <- ch_sds$ci95
ch_summs_arr <-
ch_summs %>% arrange(m)
ch_summs_arr$item_f <- factor(ch_summs_arr$item, levels=rev(ch_summs_arr$item))
ch_lf$item_f <- factor(ch_lf$item, levels=rev(c(ch_summs_arr$item)))
relab <- unlist(map(strsplit(levels(ch_lf$item_f), "change_"), 2))
sx_change_labs <- unlist(lapply(strsplit(unique(relab), "[.]"), function(x) {
out <- paste0(x, collapse=" ")
out
}))
sx_change_labs
## [1] "life meaningless" "life not worthwhile"
## [3] "no hope" "down hearted and blue"
## [5] "no positive feelings" "lost interest"
## [7] "worthless" "not worth much as person"
## [9] "easily upset" "no initiative"
## [11] "no enthusiasm" "no enjoyment"
## [13] "sad and depressed" "couldnt get going"
b_d <- ggplot(ch_lf, aes(x=scores, y=item_f)) +
geom_vline(xintercept=seq(-3, 3, 1), size=1.5, color="gray57") +
geom_jitter(alpha=.3, size=2.5, width=.1, height=0.28, pch=21, color="red", fill="white") +
#geom_hline(yintercept=seq(1, 15, 1)) +
geom_errorbar(data=ch_summs_arr, aes(x=m, y=item,
xmin=m-ci95, xmax=m+ci95), color="red",
inherit.aes=FALSE, size=2.2, width=.4) +
geom_point(data=ch_summs_arr, aes(x=m, y=item), pch=21,
fill="white", color="black", size=6, alpha=.8) +
ga + ap + theme(axis.text.y =element_text(size=11)) +
theme(axis.title.x =element_text(size=18)) +
theme(axis.text.x =element_text(size=15)) +
ylab("") +
labs(title="Change in depression symptoms \n(post - pre)") + tp +
xlab(TeX("$\\Delta$ ratings")) +
#scale_x_discrete(labels=bn) +
scale_y_discrete(labels=sx_change_labs)
# Spot check
# names(d2_short)==names(d1_short)
# data.frame(colMeans(d2_short-d1_short)[1:14])
b_d
ch_lf
## # A tibble: 1,296 × 4
## ID item scores item_f
## <int> <chr> <dbl> <fct>
## 1 1 change_no.positive.feelings -1 change_no.positive.feelings
## 2 1 change_couldnt.get.going -1 change_couldnt.get.going
## 3 1 change_easily.upset -2 change_easily.upset
## 4 1 change_sad.and.depressed -1 change_sad.and.depressed
## 5 1 change_lost.interest -1 change_lost.interest
## 6 1 change_not.worth.much.as.person 1 change_not.worth.much.as.person
## 7 1 change_life.not.worthwhile 0 change_life.not.worthwhile
## 8 1 change_no.enjoyment 0 change_no.enjoyment
## 9 1 change_down.hearted.and.blue 0 change_down.hearted.and.blue
## 10 1 change_no.enthusiasm -1 change_no.enthusiasm
## # … with 1,286 more rows
unique_items <- unique(d1_lf$item)
d1m_s <- d1m %>% filter(ID %in% d1_lf$ID)
all_res <- foreach (i = 1:length(unique_items)) %do% {
this_item <- d1_lf %>% filter(item==unique_items[i])
d1m_s_tmp <- d1m_s %>% filter(ID %in% unique(this_item$ID))
#print(this_item)
if (all(this_item$ID==d1m_s_tmp$ID)) {
res <- cor.test(this_item$scores, d1m_s_tmp$v_val_ctr_)
out <- data.table("item"=unique(this_item$item),
"r"=as.numeric(res$estimate),
"ci95_low"=as.numeric(res$conf.int[1]),
"ci95_high"=as.numeric(res$conf.int[2]),
"p"=as.numeric(res$p.value))
} else {
}
out
} %>% bind_rows()
bonferroni <- .05/length(unique(d1_lf$item))
all_res$bonferroni <- (all_res$p < bonferroni)*1
all_res[which(all_res$p < .05), "signif"] <- "*"
all_res[which(all_res$p < .01), "signif"] <- "**"
all_res[which(all_res$p < .005), "signif"] <- "***"
all_res[which(all_res$p < .001), "signif"] <- "****"
all_res[which(all_res$p < .0005), "signif"] <- "*****"
all_res[which(is.na(all_res$signif)), "signif"] <- " "
all_res_arr <- all_res %>% arrange(r)
all_res_arr$item <- factor(all_res_arr$item, levels=rev(c(all_res_arr$item)))
#all_res_arr %>% select(item, bonferroni, signif)
#ch_lf$item_f <- factor(ch_lf$item, levels=rev(c(ch_summs_arr$item)))
#relab <- unlist(map(strsplit(levels(ch_lf$item_f), "change_"), 2))
r_b_labs <- as.character(levels(all_res_arr$item))
rb_labs <- unlist(lapply(strsplit(r_b_labs, "[.]"), function(x) {
out <- paste0(x, collapse=" ")
out
}))
a_r <- ggplot(all_res_arr, aes(x=r, y=item)) +
geom_vline(xintercept = seq(-1, .2, .1), color="gray57", linetype="dotted") +
geom_vline(xintercept = c(-1, 0), color="black", size=1.4) +
geom_errorbar(data=all_res_arr, size=1.4, width=.19, color="gray42",
aes(x=r, y=item, xmin=ci95_low, xmax=ci95_high)) +
#geom_hline(yintercept=seq(1, 15, 1)) +
geom_point(size=6, fill="gray60", pch=21, alpha=.9) +
xlim(-1.15, .22) +
geom_text(x=-1.1, y=14, size=6, label="X*****") +
geom_text(x=-1.1, y=13, size=6, label="X*****") +
geom_text(x=-1.1, y=12, size=6, label="X ***") +
geom_text(x=-1.1, y=11, size=6, label="X ***") +
geom_text(x=-1.1, y=10, size=6, label="X ***") +
geom_text(x=-1.1, y=9, size=6, label="X ***") +
geom_text(x=-1.1, y=8, size=6, label=" **") +
geom_text(x=-1.1, y=7, size=6, label=" **") +
geom_text(x=-1.1, y=6, size=6, label=" *") +
geom_text(x=-1.1, y=5, size=6, label=" *") +
geom_text(x=-1.1, y=4, size=6, label=" *") +
geom_text(x=-1.1, y=3, size=6, label=" *") +
ga + theme(axis.text.y =element_text(size=11), axis.text.x=element_text(size=15),
axis.title = element_text(size=20)) +
ylab("") +
labs(title="Correlations of baseline depression symptoms with \n positive > negative drift rate estimates") +
theme(plot.title = element_text(size = 15, face='bold', hjust = .5)) +
xlab("Pearson's r (with 95% CIs)") +
scale_y_discrete(labels=rb_labs)
a_r
unique_ch_items <- unique(ch_lf$item)
d1m_s_ch <- d1m %>% filter(ID %in% ch_lf$ID)
#d1m_s_ch
all_res_ch <- foreach (i = 1:length(unique_ch_items)) %do% {
this_ch_item <- ch_lf %>% filter(item==unique_ch_items[i])
d1m_s_ch_tmp <- d1m_s_ch %>% filter(ID %in% unique(this_ch_item$ID))
#print(this_item)
if (all(this_ch_item$ID==d1m_s_ch_tmp$ID)) {
res <- cor.test(this_ch_item$scores, d1m_s_ch_tmp$v_val_ctr.sess_ctr_)
out <- data.table("item"=unique(this_ch_item$item),
"r"=as.numeric(res$estimate),
"ci95_low"=as.numeric(res$conf.int[1]),
"ci95_high"=as.numeric(res$conf.int[2]),
"p"=as.numeric(res$p.value))
} else {
}
out
} %>% bind_rows()
bonferroni_ch <- .05/length(unique(ch_lf$item))
all_res_ch$bonferroni_ch <- (all_res_ch$p < bonferroni_ch)*1
all_res_ch[which(all_res_ch$p < .05), "signif"] <- "*"
all_res_ch[which(all_res_ch$p < .01), "signif"] <- "**"
all_res_ch[which(all_res_ch$p < .005), "signif"] <- "***"
all_res_ch[which(all_res_ch$p < .001), "signif"] <- "****"
all_res_ch[which(is.na(all_res_ch$signif)), "signif"] <- " "
#all_res_ch
arc_arr <- all_res_ch %>% arrange(r)
#arc_arr
arc_arr$item_f <- factor(arc_arr$item, levels=rev(as.character(arc_arr$item)))
#arc_arr$item_f <- factor(arc_arr$item, levels=as.character(arc_arr$item))
#arc_arr$item_f
r_ch_labs <- as.character(levels(arc_arr$item_f))
rch_labs <- unlist(lapply(strsplit(r_ch_labs, "[.]"), function(x) {
out <- paste0(x, collapse=" ")
out <- unlist(map(strsplit(out, "change_"), 2))
out
}))
# all_res_ch # Spot checked
b_r <- ggplot(arc_arr, aes(x=r, y=item_f)) +
geom_text(x=-1.1, y=14, size=6, label="X ***") +
geom_text(x=-1.1, y=13, size=6, label="X ***") +
geom_text(x=-1.1, y=12, size=6, label=" ***") +
geom_text(x=-1.1, y=11, size=6, label=" **") +
# geom_text(x=-.35, y=10, size=6, label=" **") +
# geom_text(x=-.35, y=9, size=6, label=" *") +
geom_vline(xintercept = seq(-1, .2, .1), color="gray57", linetype="dotted") +
geom_vline(xintercept = c(-1, 0), color="black", size=1.4) +
xlim(-1.15, .22) +
geom_errorbar(data=arc_arr, size=1.4, width=.19, color="gray75",
aes(x=r, y=item_f, xmin=ci95_low, xmax=ci95_high)) +
geom_point(size=6, fill="gray85", pch=21, alpha=.9) +
ga +
theme(axis.text.y =element_text(size=11), axis.text.x=element_text(size=15),
axis.title = element_text(size=20)) +
ylab("") +
labs(title="Correlations of change in depression symptoms with \n positive > negative drift rate * time estimates") +
theme(plot.title = element_text(size = 15, face='bold', hjust = .5)) +
xlab("Pearson's r (with 95% CIs)") +
scale_y_discrete(labels=rch_labs)
b_r
full_sxs_p <- a_d / b_d/
a_r / b_r
#full_sxs_p
#ggsave("../paper/figs/full_sxs.png", full_sxs_p, dpi=700, width=8, height=16)
Get Cronbach’s alpha
psych::alpha(dd1[2:15])
##
## Reliability analysis
## Call: psych::alpha(x = dd1[2:15])
##
## raw_alpha std.alpha G6(smc) average_r S/N ase mean sd median_r
## 0.93 0.93 0.95 0.5 14 0.011 0.68 0.55 0.47
##
## lower alpha upper 95% confidence boundaries
## 0.91 0.93 0.95
##
## Reliability if an item is dropped:
## raw_alpha std.alpha G6(smc) average_r S/N alpha se var.r med.r
## DASS1.3 0.93 0.93 0.95 0.50 13 0.011 0.012 0.48
## DASS1.5 0.93 0.93 0.95 0.51 13 0.011 0.012 0.49
## DASS1.10 0.92 0.92 0.95 0.49 12 0.012 0.012 0.46
## DASS1.13 0.93 0.93 0.95 0.51 13 0.011 0.011 0.49
## DASS1.16 0.92 0.93 0.95 0.49 13 0.012 0.012 0.47
## DASS1.17 0.93 0.93 0.94 0.50 13 0.011 0.011 0.48
## DASS1.21 0.93 0.93 0.94 0.50 13 0.011 0.010 0.47
## DASS1.24 0.92 0.92 0.94 0.49 12 0.012 0.012 0.47
## DASS1.26 0.92 0.93 0.94 0.49 13 0.012 0.012 0.47
## DASS1.31 0.92 0.93 0.95 0.50 13 0.011 0.011 0.47
## DASS1.34 0.92 0.93 0.94 0.49 13 0.011 0.011 0.47
## DASS1.37 0.92 0.93 0.95 0.49 12 0.012 0.011 0.47
## DASS1.38 0.92 0.93 0.94 0.49 12 0.012 0.011 0.47
## DASS1.42 0.93 0.93 0.95 0.51 13 0.011 0.011 0.49
##
## Item statistics
## n raw.r std.r r.cor r.drop mean sd
## DASS1.3 95 0.70 0.70 0.67 0.64 0.66 0.78
## DASS1.5 95 0.66 0.64 0.61 0.59 1.20 0.86
## DASS1.10 95 0.80 0.80 0.79 0.76 0.63 0.85
## DASS1.13 95 0.65 0.65 0.62 0.58 1.03 0.82
## DASS1.16 95 0.75 0.75 0.72 0.70 0.48 0.67
## DASS1.17 95 0.69 0.70 0.69 0.64 0.57 0.75
## DASS1.21 95 0.70 0.72 0.71 0.66 0.32 0.61
## DASS1.24 95 0.80 0.80 0.79 0.76 0.78 0.79
## DASS1.26 95 0.76 0.75 0.74 0.71 0.88 0.78
## DASS1.31 95 0.73 0.73 0.71 0.68 0.72 0.79
## DASS1.34 95 0.74 0.75 0.74 0.69 0.49 0.74
## DASS1.37 95 0.78 0.79 0.77 0.74 0.43 0.69
## DASS1.38 95 0.75 0.77 0.77 0.72 0.25 0.56
## DASS1.42 95 0.67 0.65 0.62 0.60 1.13 0.88
##
## Non missing response frequency for each item
## 0 0.384615384615385 1 1.61538461538462 1.92307692307692 2
## DASS1.3 0.48 0.00 0.41 0.00 0.00 0.06
## DASS1.5 0.19 0.00 0.52 0.00 0.00 0.20
## DASS1.10 0.57 0.00 0.27 0.00 0.00 0.12
## DASS1.13 0.25 0.01 0.52 0.00 0.00 0.16
## DASS1.16 0.60 0.00 0.33 0.00 0.00 0.06
## DASS1.17 0.57 0.00 0.32 0.00 0.00 0.09
## DASS1.21 0.75 0.00 0.20 0.00 0.00 0.04
## DASS1.24 0.42 0.00 0.40 0.00 0.00 0.16
## DASS1.26 0.33 0.00 0.51 0.00 0.00 0.13
## DASS1.31 0.46 0.00 0.39 0.00 0.00 0.12
## DASS1.34 0.62 0.00 0.29 0.00 0.01 0.04
## DASS1.37 0.66 0.00 0.26 0.00 0.00 0.05
## DASS1.38 0.80 0.00 0.16 0.00 0.00 0.03
## DASS1.42 0.24 0.00 0.46 0.01 0.00 0.20
## 3 miss
## DASS1.3 0.04 0
## DASS1.5 0.09 0
## DASS1.10 0.04 0
## DASS1.13 0.06 0
## DASS1.16 0.01 0
## DASS1.17 0.02 0
## DASS1.21 0.01 0
## DASS1.24 0.02 0
## DASS1.26 0.04 0
## DASS1.31 0.03 0
## DASS1.34 0.03 0
## DASS1.37 0.02 0
## DASS1.38 0.01 0
## DASS1.42 0.08 0
psych::alpha(dd2[2:15])
##
## Reliability analysis
## Call: psych::alpha(x = dd2[2:15])
##
## raw_alpha std.alpha G6(smc) average_r S/N ase mean sd median_r
## 0.93 0.94 0.96 0.51 15 0.01 0.31 0.38 0.52
##
## lower alpha upper 95% confidence boundaries
## 0.91 0.93 0.95
##
## Reliability if an item is dropped:
## raw_alpha std.alpha G6(smc) average_r S/N alpha se var.r med.r
## DASS2.3 0.92 0.93 0.96 0.51 14 0.011 0.022 0.51
## DASS2.5 0.92 0.93 0.96 0.52 14 0.011 0.021 0.52
## DASS2.10 0.92 0.93 0.96 0.51 13 0.011 0.020 0.52
## DASS2.13 0.92 0.93 0.95 0.50 13 0.012 0.021 0.51
## DASS2.16 0.92 0.93 0.96 0.50 13 0.011 0.019 0.51
## DASS2.17 0.93 0.94 0.96 0.53 15 0.010 0.018 0.53
## DASS2.21 0.92 0.93 0.96 0.50 13 0.011 0.021 0.50
## DASS2.24 0.92 0.93 0.96 0.51 13 0.011 0.019 0.51
## DASS2.26 0.92 0.93 0.96 0.51 13 0.011 0.022 0.52
## DASS2.31 0.92 0.93 0.96 0.51 14 0.011 0.020 0.51
## DASS2.34 0.93 0.94 0.96 0.54 15 0.010 0.017 0.53
## DASS2.37 0.92 0.93 0.95 0.50 13 0.011 0.020 0.51
## DASS2.38 0.92 0.93 0.96 0.51 14 0.011 0.022 0.52
## DASS2.42 0.92 0.93 0.96 0.52 14 0.011 0.021 0.53
##
## Item statistics
## n raw.r std.r r.cor r.drop mean sd
## DASS2.3 94 0.76 0.74 0.72 0.71 0.35 0.56
## DASS2.5 94 0.75 0.70 0.68 0.67 0.70 0.76
## DASS2.10 94 0.74 0.76 0.75 0.70 0.20 0.50
## DASS2.13 94 0.82 0.80 0.79 0.78 0.50 0.63
## DASS2.16 94 0.81 0.84 0.84 0.78 0.18 0.41
## DASS2.17 94 0.54 0.58 0.55 0.49 0.16 0.40
## DASS2.21 94 0.79 0.82 0.81 0.77 0.14 0.35
## DASS2.24 94 0.76 0.77 0.76 0.71 0.29 0.58
## DASS2.26 94 0.79 0.76 0.75 0.74 0.59 0.68
## DASS2.31 94 0.75 0.76 0.74 0.70 0.22 0.47
## DASS2.34 94 0.52 0.55 0.51 0.46 0.13 0.40
## DASS2.37 94 0.79 0.81 0.81 0.76 0.16 0.40
## DASS2.38 94 0.70 0.74 0.72 0.66 0.11 0.34
## DASS2.42 94 0.77 0.72 0.70 0.70 0.67 0.77
##
## Non missing response frequency for each item
## 0 0.636363636363636 1 2 3 miss
## DASS2.3 0.69 0.00 0.27 0.04 0.00 0
## DASS2.5 0.44 0.01 0.46 0.05 0.04 0
## DASS2.10 0.83 0.00 0.15 0.01 0.01 0
## DASS2.13 0.57 0.01 0.34 0.07 0.00 0
## DASS2.16 0.83 0.00 0.16 0.01 0.00 0
## DASS2.17 0.85 0.00 0.14 0.01 0.00 0
## DASS2.21 0.86 0.00 0.14 0.00 0.00 0
## DASS2.24 0.77 0.00 0.19 0.03 0.01 0
## DASS2.26 0.52 0.00 0.37 0.11 0.00 0
## DASS2.31 0.80 0.00 0.18 0.02 0.00 0
## DASS2.34 0.88 0.01 0.09 0.02 0.00 0
## DASS2.37 0.85 0.00 0.14 0.01 0.00 0
## DASS2.38 0.90 0.00 0.09 0.01 0.00 0
## DASS2.42 0.48 0.00 0.40 0.09 0.03 0
FFMQ
iq_c_m <- iq_c[grep("FFMQ", names(iq_c))]
# Reverse code items
rev_items <- c(3, 5, 8, 10, 12, 13, 14, 16, 17, 18, 22, 23, 25, 28, 30, 34, 35, 38, 39)
# Check
#sort(c(3, 10, 14, 17, 25, 30, 35, 39, 5, 8, 13, 18, 23, 28, 34, 38, 12, 16, 22))==rev_items
aware <- c(5, 8, 13, 18, 23, 28, 34, 38)
non_judge <- c(3, 10, 14, 17, 25, 30, 35, 39)
non_react <- c(4, 9, 19, 21, 24, 29, 33)
observe <- c(1, 6, 11, 15, 20, 26, 31, 36)
Time 1
m1 <- iq_c_m[grep("FFMQ1.", names(iq_c_m))]
# 1.31% of baseline data missing
sum(length(which(m1==999)), length(which(is.na(m1))))/(dim(m1)[1]*dim(m1)[2])*100
## [1] 1.308761
m1[m1==999] <- NA
m1[paste0("FFMQ1.", rev_items)] <- 6-m1[paste0("FFMQ1.", rev_items)]
m1 <- data.frame("ID"=iq_c$ID.ffmq, m1)
#m1[which(is.na(rowSums(m1))), ]
# Delete 50 who's also missing ffmq data
m1 <- m1[!m1$ID==50, ]
length(which(is.na(m1)))/(dim(m1)[1]*dim(m1)[2])*100
## [1] 0.2631579
# For robustness check, casewise delete pts with any data missing
m1_casewise_delete <- m1[-which(is.na(rowSums(m1))), ]
# Replace any NAs with the mean of the remainding items
for (r in 1:nrow(m1)) {
if (any(is.na(m1[r, 2:40]))) {
this_row <- as.numeric(unlist(m1[r, 2:40])) # Exclude the ID
# Mean to replace NAs with
mean_non_na <- mean(this_row[which(!is.na(this_row))])
this_row[is.na(as.numeric(unlist(this_row)))] <- mean_non_na
# Put in df
m1[r, 2:40] <- this_row
}
}
#m1[which(is.na(rowSums(m1))), ]
#which(is.na(m1))
m1$aware_sum <- rowSums(m1[2:40][aware])
m1$nonjudge_sum <- rowSums(m1[2:40][non_judge])
m1$nonreact_sum <- rowSums(m1[2:40][non_react])
m1$observe_sum <- rowSums(m1[2:40][observe])
m1_casewise_delete$aware_sum <- rowSums(m1_casewise_delete[2:40][aware])
m1_casewise_delete$nonjudge_sum <- rowSums(m1_casewise_delete[2:40][non_judge])
m1_casewise_delete$nonreact_sum <- rowSums(m1_casewise_delete[2:40][non_react])
m1_casewise_delete$observe_sum <- rowSums(m1_casewise_delete[2:40][observe])
cor.test(m1$observe_sum, m1$aware_sum)
##
## Pearson's product-moment correlation
##
## data: m1$observe_sum and m1$aware_sum
## t = 0.60707, df = 93, p-value = 0.5453
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.1404957 0.2610637
## sample estimates:
## cor
## 0.06282629
cor.test(m1$observe_sum, m1$nonjudge_sum)
##
## Pearson's product-moment correlation
##
## data: m1$observe_sum and m1$nonjudge_sum
## t = 0.35017, df = 93, p-value = 0.727
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.1664736 0.2361032
## sample estimates:
## cor
## 0.03628692
cor.test(m1$observe_sum, m1$nonreact_sum)
##
## Pearson's product-moment correlation
##
## data: m1$observe_sum and m1$nonreact_sum
## t = 2.4114, df = 93, p-value = 0.01786
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.04314921 0.42342407
## sample estimates:
## cor
## 0.2425826
cor.test(m1$aware_sum, m1$nonreact_sum)
##
## Pearson's product-moment correlation
##
## data: m1$aware_sum and m1$nonreact_sum
## t = 3.6369, df = 93, p-value = 0.0004525
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.1629167 0.5176046
## sample estimates:
## cor
## 0.3528732
mean(m1$aware_sum); sd(m1$aware_sum)
## [1] 24.04737
## [1] 5.984788
mean(m1$nonjudge_sum); sd(m1$nonjudge_sum)
## [1] 26.30471
## [1] 7.658276
mean(m1$nonreact_sum); sd(m1$nonreact_sum)
## [1] 18.83914
## [1] 3.837917
psych::alpha(m1[2:40][aware])
##
## Reliability analysis
## Call: psych::alpha(x = m1[2:40][aware])
##
## raw_alpha std.alpha G6(smc) average_r S/N ase mean sd median_r
## 0.88 0.88 0.9 0.48 7.5 0.018 3 0.75 0.52
##
## lower alpha upper 95% confidence boundaries
## 0.85 0.88 0.92
##
## Reliability if an item is dropped:
## raw_alpha std.alpha G6(smc) average_r S/N alpha se var.r med.r
## FFMQ1.5 0.86 0.86 0.87 0.47 6.3 0.022 0.020 0.52
## FFMQ1.8 0.86 0.86 0.88 0.46 6.0 0.023 0.028 0.52
## FFMQ1.13 0.86 0.86 0.87 0.47 6.3 0.022 0.021 0.52
## FFMQ1.18 0.88 0.88 0.90 0.51 7.3 0.019 0.027 0.54
## FFMQ1.23 0.87 0.87 0.89 0.50 6.9 0.020 0.030 0.52
## FFMQ1.28 0.87 0.87 0.89 0.48 6.5 0.021 0.031 0.53
## FFMQ1.34 0.88 0.88 0.90 0.52 7.6 0.018 0.020 0.53
## FFMQ1.38 0.86 0.85 0.87 0.46 5.9 0.023 0.029 0.46
##
## Item statistics
## n raw.r std.r r.cor r.drop mean sd
## FFMQ1.5 95 0.79 0.78 0.78 0.71 2.8 1.08
## FFMQ1.8 95 0.82 0.82 0.79 0.76 3.3 1.01
## FFMQ1.13 95 0.80 0.78 0.77 0.71 2.9 1.15
## FFMQ1.18 95 0.65 0.65 0.57 0.54 3.0 0.94
## FFMQ1.23 95 0.70 0.70 0.64 0.59 3.0 1.04
## FFMQ1.28 95 0.74 0.75 0.70 0.65 3.1 0.96
## FFMQ1.34 95 0.59 0.61 0.54 0.48 3.1 0.91
## FFMQ1.38 95 0.84 0.84 0.83 0.78 3.0 0.95
##
## Non missing response frequency for each item
## 1 2 2.71052631578947 2.78947368421053 3 4 5 miss
## FFMQ1.5 0.14 0.26 0.00 0.00 0.33 0.23 0.04 0
## FFMQ1.8 0.01 0.24 0.00 0.00 0.32 0.31 0.13 0
## FFMQ1.13 0.16 0.19 0.00 0.00 0.34 0.25 0.06 0
## FFMQ1.18 0.04 0.27 0.01 0.00 0.40 0.22 0.05 0
## FFMQ1.23 0.05 0.29 0.00 0.01 0.34 0.22 0.08 0
## FFMQ1.28 0.02 0.28 0.00 0.00 0.36 0.26 0.07 0
## FFMQ1.34 0.02 0.23 0.00 0.00 0.45 0.22 0.07 0
## FFMQ1.38 0.05 0.25 0.00 0.00 0.42 0.22 0.05 0
psych::alpha(m1[2:40][non_judge])
##
## Reliability analysis
## Call: psych::alpha(x = m1[2:40][non_judge])
##
## raw_alpha std.alpha G6(smc) average_r S/N ase mean sd median_r
## 0.94 0.94 0.94 0.67 16 0.0091 3.3 0.96 0.67
##
## lower alpha upper 95% confidence boundaries
## 0.92 0.94 0.96
##
## Reliability if an item is dropped:
## raw_alpha std.alpha G6(smc) average_r S/N alpha se var.r med.r
## FFMQ1.3 0.94 0.94 0.94 0.69 16 0.0096 0.0035 0.68
## FFMQ1.10 0.93 0.94 0.94 0.67 14 0.0103 0.0052 0.67
## FFMQ1.14 0.94 0.94 0.93 0.68 15 0.0101 0.0040 0.67
## FFMQ1.17 0.93 0.93 0.93 0.66 13 0.0110 0.0035 0.65
## FFMQ1.25 0.93 0.93 0.93 0.66 13 0.0110 0.0041 0.65
## FFMQ1.30 0.93 0.93 0.93 0.67 14 0.0106 0.0042 0.66
## FFMQ1.35 0.93 0.93 0.93 0.67 14 0.0104 0.0036 0.66
## FFMQ1.39 0.94 0.94 0.94 0.68 15 0.0101 0.0053 0.67
##
## Item statistics
## n raw.r std.r r.cor r.drop mean sd
## FFMQ1.3 95 0.78 0.78 0.74 0.72 3.1 1.1
## FFMQ1.10 95 0.84 0.84 0.81 0.78 3.1 1.1
## FFMQ1.14 95 0.83 0.83 0.80 0.77 3.6 1.1
## FFMQ1.17 95 0.89 0.89 0.88 0.85 3.1 1.2
## FFMQ1.25 95 0.88 0.89 0.87 0.85 3.1 1.1
## FFMQ1.30 95 0.86 0.86 0.84 0.81 3.5 1.1
## FFMQ1.35 95 0.85 0.84 0.83 0.79 3.4 1.2
## FFMQ1.39 95 0.83 0.83 0.79 0.77 3.4 1.2
##
## Non missing response frequency for each item
## 1 2 2.94736842105263 3 4 5 miss
## FFMQ1.3 0.06 0.25 0.00 0.34 0.23 0.12 0
## FFMQ1.10 0.07 0.24 0.00 0.36 0.21 0.12 0
## FFMQ1.14 0.02 0.15 0.00 0.36 0.16 0.32 0
## FFMQ1.17 0.04 0.35 0.00 0.28 0.17 0.16 0
## FFMQ1.25 0.03 0.29 0.00 0.35 0.18 0.15 0
## FFMQ1.30 0.02 0.16 0.01 0.32 0.25 0.24 0
## FFMQ1.35 0.04 0.19 0.00 0.31 0.22 0.24 0
## FFMQ1.39 0.03 0.25 0.00 0.24 0.20 0.27 0
psych::alpha(m1[2:40][non_react])
##
## Reliability analysis
## Call: psych::alpha(x = m1[2:40][non_react])
##
## raw_alpha std.alpha G6(smc) average_r S/N ase mean sd median_r
## 0.77 0.78 0.78 0.33 3.5 0.036 2.7 0.55 0.32
##
## lower alpha upper 95% confidence boundaries
## 0.7 0.77 0.84
##
## Reliability if an item is dropped:
## raw_alpha std.alpha G6(smc) average_r S/N alpha se var.r med.r
## FFMQ1.4 0.77 0.77 0.77 0.36 3.4 0.037 0.018 0.39
## FFMQ1.9 0.72 0.73 0.73 0.31 2.7 0.045 0.023 0.28
## FFMQ1.19 0.74 0.75 0.73 0.33 2.9 0.041 0.015 0.30
## FFMQ1.21 0.75 0.76 0.76 0.34 3.1 0.040 0.021 0.34
## FFMQ1.24 0.78 0.78 0.78 0.38 3.6 0.036 0.017 0.39
## FFMQ1.29 0.71 0.71 0.71 0.29 2.5 0.047 0.015 0.28
## FFMQ1.33 0.73 0.73 0.73 0.31 2.7 0.043 0.018 0.32
##
## Item statistics
## n raw.r std.r r.cor r.drop mean sd
## FFMQ1.4 95 0.58 0.55 0.44 0.38 2.8 0.96
## FFMQ1.9 95 0.73 0.73 0.67 0.60 2.7 0.87
## FFMQ1.19 95 0.65 0.66 0.60 0.50 2.7 0.81
## FFMQ1.21 95 0.62 0.62 0.53 0.46 3.0 0.85
## FFMQ1.24 95 0.50 0.50 0.36 0.31 2.7 0.84
## FFMQ1.29 95 0.78 0.78 0.77 0.66 2.6 0.85
## FFMQ1.33 95 0.70 0.72 0.68 0.58 2.4 0.72
##
## Non missing response frequency for each item
## 1 2 3 3.2972972972973 3.68421052631579 3.73684210526316 4
## FFMQ1.4 0.08 0.33 0.37 0.00 0.00 0.00 0.19
## FFMQ1.9 0.03 0.47 0.31 0.00 0.00 0.00 0.17
## FFMQ1.19 0.05 0.37 0.42 0.01 0.01 0.00 0.13
## FFMQ1.21 0.00 0.28 0.44 0.00 0.00 0.00 0.22
## FFMQ1.24 0.06 0.37 0.39 0.00 0.00 0.01 0.17
## FFMQ1.29 0.07 0.38 0.40 0.00 0.00 0.00 0.14
## FFMQ1.33 0.07 0.55 0.32 0.00 0.00 0.00 0.06
## 5 miss
## FFMQ1.4 0.03 0
## FFMQ1.9 0.02 0
## FFMQ1.19 0.01 0
## FFMQ1.21 0.05 0
## FFMQ1.24 0.00 0
## FFMQ1.29 0.01 0
## FFMQ1.33 0.00 0
psych::alpha(m1[2:40][observe])
##
## Reliability analysis
## Call: psych::alpha(x = m1[2:40][observe])
##
## raw_alpha std.alpha G6(smc) average_r S/N ase mean sd median_r
## 0.82 0.82 0.84 0.37 4.7 0.028 3.2 0.69 0.35
##
## lower alpha upper 95% confidence boundaries
## 0.76 0.82 0.88
##
## Reliability if an item is dropped:
## raw_alpha std.alpha G6(smc) average_r S/N alpha se var.r med.r
## FFMQ1.1 0.80 0.80 0.82 0.37 4.1 0.032 0.019 0.35
## FFMQ1.6 0.78 0.79 0.80 0.35 3.7 0.034 0.016 0.33
## FFMQ1.11 0.82 0.82 0.82 0.40 4.6 0.028 0.013 0.41
## FFMQ1.15 0.79 0.79 0.80 0.35 3.8 0.034 0.016 0.35
## FFMQ1.20 0.80 0.81 0.81 0.38 4.2 0.031 0.013 0.35
## FFMQ1.26 0.80 0.80 0.81 0.37 4.1 0.032 0.017 0.35
## FFMQ1.31 0.79 0.80 0.81 0.36 3.9 0.033 0.017 0.35
## FFMQ1.36 0.81 0.82 0.82 0.39 4.5 0.030 0.016 0.41
##
## Item statistics
## n raw.r std.r r.cor r.drop mean sd
## FFMQ1.1 95 0.67 0.67 0.60 0.54 2.5 1.03
## FFMQ1.6 95 0.75 0.75 0.72 0.64 2.8 1.02
## FFMQ1.11 95 0.59 0.56 0.48 0.41 3.0 1.22
## FFMQ1.15 95 0.74 0.75 0.71 0.64 3.2 0.92
## FFMQ1.20 95 0.63 0.64 0.59 0.50 3.4 0.98
## FFMQ1.26 95 0.67 0.68 0.62 0.55 3.8 0.94
## FFMQ1.31 95 0.71 0.71 0.67 0.59 3.8 1.07
## FFMQ1.36 95 0.61 0.59 0.52 0.47 3.3 1.04
##
## Non missing response frequency for each item
## 1 2 3 3.2972972972973 4 5 miss
## FFMQ1.1 0.19 0.34 0.28 0.00 0.18 0.01 0
## FFMQ1.6 0.13 0.24 0.37 0.00 0.24 0.02 0
## FFMQ1.11 0.14 0.23 0.23 0.00 0.31 0.09 0
## FFMQ1.15 0.05 0.13 0.47 0.00 0.28 0.06 0
## FFMQ1.20 0.04 0.11 0.42 0.00 0.31 0.13 0
## FFMQ1.26 0.02 0.06 0.24 0.01 0.43 0.23 0
## FFMQ1.31 0.03 0.07 0.25 0.00 0.32 0.33 0
## FFMQ1.36 0.05 0.17 0.36 0.00 0.31 0.12 0
# Time 2
# 2.24 % of final data missing
m2 <- iq_c_m[grep("FFMQ2.", names(iq_c_m))]
sum(length(which(m2==999)), length(which(is.na(m2))))/(dim(m2)[1]*dim(m2)[2])*100
## [1] 2.24359
m2[m2==999] <- NA
m2[paste0("FFMQ2.", rev_items)] <- 6-m2[paste0("FFMQ2.", rev_items)]
m2 <- data.frame("ID"=iq_c$ID.ffmq, m2)
m2[which(is.na(rowSums(m2))), ]
## ID FFMQ2.1 FFMQ2.2 FFMQ2.3 FFMQ2.4 FFMQ2.5 FFMQ2.6 FFMQ2.7 FFMQ2.8 FFMQ2.9
## 30 34 3 3 5 3 3 1 2 3 3
## 38 44 4 3 3 3 4 NA 3 4 3
## 48 56 NA NA NA NA NA NA NA NA NA
## 50 58 NA NA NA NA NA NA NA NA NA
## 86 101 4 5 3 3 4 4 5 3 3
## 92 109 5 5 5 1 5 5 5 5 4
## 96 113 4 4 5 NA 2 4 3 3 4
## FFMQ2.10 FFMQ2.11 FFMQ2.12 FFMQ2.13 FFMQ2.14 FFMQ2.15 FFMQ2.16 FFMQ2.17
## 30 5 5 4 3 5 2 3 5
## 38 3 4 3 4 3 4 NA 3
## 48 NA NA NA NA NA NA NA NA
## 50 NA NA NA NA NA NA NA NA
## 86 4 4 5 3 4 5 5 4
## 92 5 4 5 5 5 5 5 5
## 96 5 5 4 3 5 5 3 4
## FFMQ2.18 FFMQ2.19 FFMQ2.20 FFMQ2.21 FFMQ2.22 FFMQ2.23 FFMQ2.24 FFMQ2.25
## 30 3 4 2 3 2 2 3 5
## 38 4 3 4 3 4 4 3 3
## 48 NA NA NA NA NA NA NA NA
## 50 NA NA NA NA NA NA NA NA
## 86 3 3 5 4 5 3 3 3
## 92 5 4 5 NA 5 5 4 5
## 96 3 4 5 4 4 4 4 5
## FFMQ2.26 FFMQ2.27 FFMQ2.28 FFMQ2.29 FFMQ2.30 FFMQ2.31 FFMQ2.32 FFMQ2.33
## 30 2 2 3 3 5 NA 1 3
## 38 3 3 4 3 3 4 3 3
## 48 NA NA NA NA NA NA NA NA
## 50 NA NA NA NA NA NA NA NA
## 86 5 5 3 3 5 5 5 2
## 92 5 5 5 4 5 5 5 4
## 96 5 3 3 4 5 5 4 5
## FFMQ2.34 FFMQ2.35 FFMQ2.36 FFMQ2.37 FFMQ2.38 FFMQ2.39
## 30 3 5 4 2 2 5
## 38 4 4 3 3 4 3
## 48 NA NA NA NA NA NA
## 50 NA NA NA NA NA NA
## 86 3 NA 5 5 3 4
## 92 5 5 5 5 5 5
## 96 3 5 5 3 3 5
ids_to_del <- c(56, 58)
# Delete 50 who's also missing ffmq data
m2 <- m2 %>% filter(!ID %in% ids_to_del)
length(which(is.na(m2)))/(dim(m2)[1]*dim(m2)[2])*100
## [1] 0.1595745
# For robustness check, casewise delete pts with any data missing
m2_casewise_delete <- m2[-which(is.na(rowSums(m2))), ]
# Replace any NAs with the mean of the remainding items
for (r in 1:nrow(m2)) {
if (any(is.na(m2[r, 2:40]))) {
this_row <- as.numeric(unlist(m2[r, 2:40])) # Exclude the ID
# Mean to replace NAs with
mean_non_na <- mean(this_row[which(!is.na(this_row))])
this_row[is.na(as.numeric(unlist(this_row)))] <- mean_non_na
# Put in df
m2[r, 2:40] <- this_row
}
}
#which(is.na(m2))
m2$aware_sum <- rowSums(m2[2:40][aware])
m2$nonjudge_sum <- rowSums(m2[2:40][non_judge])
m2$nonreact_sum <- rowSums(m2[2:40][non_react])
m2$observe_sum <- rowSums(m2[2:40][observe])
m2_casewise_delete$aware_sum <- rowSums(m2_casewise_delete[2:40][aware])
m2_casewise_delete$nonjudge_sum <- rowSums(m2_casewise_delete[2:40][non_judge])
m2_casewise_delete$nonreact_sum <- rowSums(m2_casewise_delete[2:40][non_react])
psych::alpha(m2[2:40][aware])
##
## Reliability analysis
## Call: psych::alpha(x = m2[2:40][aware])
##
## raw_alpha std.alpha G6(smc) average_r S/N ase mean sd median_r
## 0.9 0.9 0.91 0.53 9.1 0.016 3.5 0.63 0.54
##
## lower alpha upper 95% confidence boundaries
## 0.87 0.9 0.93
##
## Reliability if an item is dropped:
## raw_alpha std.alpha G6(smc) average_r S/N alpha se var.r med.r
## FFMQ2.5 0.89 0.89 0.90 0.54 8.2 0.018 0.013 0.56
## FFMQ2.8 0.88 0.88 0.90 0.52 7.6 0.019 0.016 0.55
## FFMQ2.13 0.89 0.89 0.89 0.53 7.9 0.018 0.012 0.54
## FFMQ2.18 0.89 0.89 0.91 0.54 8.3 0.018 0.016 0.56
## FFMQ2.23 0.89 0.89 0.90 0.53 8.0 0.017 0.014 0.55
## FFMQ2.28 0.89 0.89 0.90 0.55 8.5 0.017 0.012 0.55
## FFMQ2.34 0.88 0.88 0.90 0.52 7.6 0.019 0.014 0.54
## FFMQ2.38 0.88 0.88 0.89 0.51 7.2 0.019 0.015 0.50
##
## Item statistics
## n raw.r std.r r.cor r.drop mean sd
## FFMQ2.5 94 0.75 0.73 0.70 0.66 3.2 0.87
## FFMQ2.8 94 0.80 0.80 0.76 0.73 3.6 0.83
## FFMQ2.13 94 0.80 0.77 0.75 0.70 3.4 0.99
## FFMQ2.18 94 0.74 0.73 0.67 0.64 3.4 0.84
## FFMQ2.23 94 0.74 0.76 0.72 0.66 3.6 0.84
## FFMQ2.28 94 0.68 0.71 0.66 0.60 3.5 0.70
## FFMQ2.34 94 0.79 0.81 0.78 0.73 3.6 0.73
## FFMQ2.38 94 0.84 0.84 0.83 0.78 3.5 0.80
##
## Non missing response frequency for each item
## 1 2 3 4 5 miss
## FFMQ2.5 0.03 0.15 0.46 0.31 0.05 0
## FFMQ2.8 0.02 0.03 0.38 0.44 0.13 0
## FFMQ2.13 0.03 0.15 0.38 0.31 0.13 0
## FFMQ2.18 0.01 0.10 0.43 0.37 0.10 0
## FFMQ2.23 0.00 0.09 0.40 0.37 0.14 0
## FFMQ2.28 0.00 0.04 0.52 0.36 0.07 0
## FFMQ2.34 0.00 0.04 0.46 0.40 0.10 0
## FFMQ2.38 0.00 0.09 0.44 0.37 0.11 0
psych::alpha(m2[2:40][non_judge])
##
## Reliability analysis
## Call: psych::alpha(x = m2[2:40][non_judge])
##
## raw_alpha std.alpha G6(smc) average_r S/N ase mean sd median_r
## 0.94 0.94 0.94 0.68 17 0.0087 4 0.83 0.68
##
## lower alpha upper 95% confidence boundaries
## 0.93 0.94 0.96
##
## Reliability if an item is dropped:
## raw_alpha std.alpha G6(smc) average_r S/N alpha se var.r med.r
## FFMQ2.3 0.93 0.94 0.93 0.67 15 0.0102 0.0039 0.67
## FFMQ2.10 0.94 0.94 0.93 0.68 15 0.0099 0.0026 0.69
## FFMQ2.14 0.94 0.94 0.94 0.70 16 0.0094 0.0021 0.69
## FFMQ2.17 0.94 0.94 0.93 0.68 15 0.0098 0.0031 0.69
## FFMQ2.25 0.93 0.93 0.93 0.66 14 0.0108 0.0019 0.66
## FFMQ2.30 0.94 0.94 0.94 0.68 15 0.0101 0.0038 0.68
## FFMQ2.35 0.93 0.94 0.93 0.67 14 0.0103 0.0039 0.66
## FFMQ2.39 0.94 0.94 0.94 0.68 15 0.0098 0.0030 0.68
##
## Item statistics
## n raw.r std.r r.cor r.drop mean sd
## FFMQ2.3 94 0.86 0.86 0.84 0.81 3.9 1.00
## FFMQ2.10 94 0.84 0.84 0.82 0.79 4.0 0.98
## FFMQ2.14 94 0.78 0.79 0.75 0.72 4.2 0.87
## FFMQ2.17 94 0.84 0.84 0.81 0.78 3.7 1.05
## FFMQ2.25 94 0.90 0.90 0.90 0.87 3.9 0.97
## FFMQ2.30 94 0.85 0.85 0.83 0.80 4.1 0.97
## FFMQ2.35 94 0.86 0.87 0.85 0.82 4.1 0.93
## FFMQ2.39 94 0.84 0.83 0.81 0.78 4.0 1.07
##
## Non missing response frequency for each item
## 1 2 3 3.94736842105263 4 5 miss
## FFMQ2.3 0.01 0.06 0.31 0.00 0.28 0.34 0
## FFMQ2.10 0.01 0.05 0.26 0.00 0.30 0.38 0
## FFMQ2.14 0.00 0.03 0.19 0.00 0.31 0.47 0
## FFMQ2.17 0.01 0.14 0.29 0.00 0.30 0.27 0
## FFMQ2.25 0.01 0.05 0.28 0.00 0.31 0.35 0
## FFMQ2.30 0.01 0.04 0.21 0.00 0.27 0.47 0
## FFMQ2.35 0.00 0.06 0.20 0.01 0.32 0.40 0
## FFMQ2.39 0.01 0.09 0.24 0.00 0.21 0.45 0
psych::alpha(m2[2:40][non_react])
##
## Reliability analysis
## Call: psych::alpha(x = m2[2:40][non_react])
##
## raw_alpha std.alpha G6(smc) average_r S/N ase mean sd median_r
## 0.85 0.85 0.85 0.44 5.6 0.024 3.3 0.55 0.4
##
## lower alpha upper 95% confidence boundaries
## 0.8 0.85 0.89
##
## Reliability if an item is dropped:
## raw_alpha std.alpha G6(smc) average_r S/N alpha se var.r med.r
## FFMQ2.4 0.84 0.84 0.84 0.47 5.2 0.025 0.0124 0.48
## FFMQ2.9 0.82 0.82 0.82 0.43 4.6 0.028 0.0146 0.38
## FFMQ2.19 0.81 0.81 0.81 0.42 4.3 0.030 0.0116 0.38
## FFMQ2.21 0.85 0.85 0.84 0.48 5.5 0.025 0.0119 0.50
## FFMQ2.24 0.83 0.83 0.82 0.45 4.9 0.027 0.0102 0.41
## FFMQ2.29 0.82 0.82 0.81 0.43 4.6 0.029 0.0116 0.40
## FFMQ2.33 0.81 0.81 0.80 0.42 4.4 0.030 0.0092 0.38
##
## Item statistics
## n raw.r std.r r.cor r.drop mean sd
## FFMQ2.4 94 0.64 0.65 0.56 0.51 3.1 0.73
## FFMQ2.9 94 0.75 0.76 0.70 0.65 3.3 0.75
## FFMQ2.19 94 0.80 0.79 0.76 0.70 3.4 0.77
## FFMQ2.21 94 0.61 0.62 0.51 0.47 3.5 0.71
## FFMQ2.24 94 0.71 0.70 0.65 0.58 3.4 0.83
## FFMQ2.29 94 0.75 0.75 0.71 0.65 3.2 0.73
## FFMQ2.33 94 0.79 0.79 0.77 0.70 3.2 0.78
##
## Non missing response frequency for each item
## 1 2 3 4 4.05263157894737 4.73684210526316 5 miss
## FFMQ2.4 0.03 0.10 0.62 0.22 0.01 0.00 0.02 0
## FFMQ2.9 0.01 0.10 0.50 0.35 0.00 0.00 0.04 0
## FFMQ2.19 0.01 0.09 0.46 0.39 0.00 0.00 0.05 0
## FFMQ2.21 0.01 0.02 0.50 0.39 0.00 0.01 0.06 0
## FFMQ2.24 0.03 0.06 0.48 0.36 0.00 0.00 0.06 0
## FFMQ2.29 0.01 0.12 0.56 0.28 0.00 0.00 0.03 0
## FFMQ2.33 0.02 0.13 0.52 0.30 0.00 0.00 0.03 0
psych::alpha(m2[2:40][observe])
##
## Reliability analysis
## Call: psych::alpha(x = m2[2:40][observe])
##
## raw_alpha std.alpha G6(smc) average_r S/N ase mean sd median_r
## 0.89 0.9 0.9 0.52 8.7 0.016 3.8 0.73 0.51
##
## lower alpha upper 95% confidence boundaries
## 0.86 0.89 0.93
##
## Reliability if an item is dropped:
## raw_alpha std.alpha G6(smc) average_r S/N alpha se var.r med.r
## FFMQ2.1 0.89 0.89 0.90 0.53 8.0 0.018 0.016 0.51
## FFMQ2.6 0.88 0.88 0.89 0.51 7.3 0.020 0.016 0.51
## FFMQ2.11 0.89 0.90 0.90 0.55 8.6 0.017 0.014 0.57
## FFMQ2.15 0.87 0.87 0.87 0.49 6.7 0.021 0.012 0.50
## FFMQ2.20 0.87 0.87 0.88 0.50 6.9 0.020 0.011 0.50
## FFMQ2.26 0.88 0.88 0.89 0.52 7.7 0.018 0.014 0.51
## FFMQ2.31 0.88 0.88 0.89 0.52 7.5 0.019 0.014 0.51
## FFMQ2.36 0.89 0.89 0.89 0.54 8.1 0.018 0.016 0.52
##
## Item statistics
## n raw.r std.r r.cor r.drop mean sd
## FFMQ2.1 94 0.71 0.71 0.65 0.61 3.6 0.96
## FFMQ2.6 94 0.80 0.80 0.76 0.72 3.5 1.08
## FFMQ2.11 94 0.65 0.65 0.58 0.54 3.6 0.93
## FFMQ2.15 94 0.87 0.87 0.87 0.82 4.1 0.86
## FFMQ2.20 94 0.84 0.84 0.84 0.79 3.9 0.91
## FFMQ2.26 94 0.75 0.75 0.71 0.66 4.1 1.02
## FFMQ2.31 94 0.77 0.76 0.73 0.68 4.2 0.99
## FFMQ2.36 94 0.70 0.71 0.65 0.60 3.7 0.91
##
## Non missing response frequency for each item
## 1 2 3 3.21052631578947 3.40540540540541 4 5 miss
## FFMQ2.1 0.02 0.11 0.33 0.00 0.00 0.38 0.16 0
## FFMQ2.6 0.04 0.14 0.26 0.00 0.01 0.36 0.19 0
## FFMQ2.11 0.02 0.11 0.29 0.00 0.00 0.45 0.14 0
## FFMQ2.15 0.01 0.03 0.16 0.00 0.00 0.45 0.35 0
## FFMQ2.20 0.01 0.05 0.22 0.00 0.00 0.41 0.30 0
## FFMQ2.26 0.01 0.09 0.16 0.00 0.00 0.31 0.44 0
## FFMQ2.31 0.03 0.02 0.14 0.01 0.00 0.31 0.49 0
## FFMQ2.36 0.02 0.04 0.36 0.00 0.00 0.38 0.19 0
d1m is the MAP est df created above - rename d1me
d1me <- d1m
–Valence: Baseline effects–
Depression
Put MAP ests, DASS scores, and endorsement differences at baseline into the same dataframe
# Match the IDs in the qairre and map ests datasets
dd1_red <- dd1 %>%
filter(ID %in% intersect(d1me$ID, dd1$ID)) %>% arrange(ID)
d1_red <- d1me %>% filter(ID %in% intersect(d1me$ID, dd1_red$ID)) %>% arrange(ID)
# Combine them
if (all(d1_red$ID == dd1_red$ID)) fdf_depr <- data.frame(d1_red, dd1_red$dass_sum_score)
Recalc baseline positive and negative endorsement diffs
nends_b <- bx_df %>% filter(session=="Pre", valence=="negative") %>%
group_by(subj_idx) %>% summarize(m=mean(response), mrt=mean(rt))
pends_b <- bx_df %>% filter(session=="Pre", valence=="positive") %>% group_by(subj_idx) %>%
summarize(m=mean(response), mrt=mean(rt))
if (all(pends_b$subj_idx==nends_b$subj_idx)) {
pends_b$pn_diff <- pends_b$m-nends_b$m
pends_b$pn_rt_diff <- pends_b$mrt-nends_b$mrt
}
# Match and combine the endorsements dataset with the depression one
ends_sub <- pends_b %>% filter(subj_idx %in% fdf_depr$ID)
# Add the difference in positive-negative endorsements to the depression dataset
if (all(ends_sub$subj_idx==fdf_depr$ID)) fdf_depr <- data.frame(fdf_depr, "pos_neg_ends_diff"=ends_sub$pn_diff)
rt <- bx_df %>%
group_by(subj_idx) %>% summarize(m=mean(rt), mrt=mean(rt))
rt_val <- bx_df %>%
group_by(subj_idx, valence) %>% summarize(mrt=mean(rt))
## `summarise()` has grouped output by 'subj_idx'. You can override using the
## `.groups` argument.
rt_val_short <- data.frame(
"ID"=unique(rt_val$subj_idx),
"overall_rt_diff"=rt_val[rt_val$valence == "positive", "mrt"]-rt_val[rt_val$valence == "negative", "mrt"])
nends_p <- bx_df %>% filter(session=="Post", valence=="negative") %>%
group_by(subj_idx) %>% summarize(m=mean(response), mrt=mean(rt))
pends_p <- bx_df %>% filter(session=="Post", valence=="positive") %>% group_by(subj_idx) %>% summarize(m=mean(response), mrt=mean(rt))
if (all(pends_p$subj_idx==nends_p$subj_idx)) {
pends_p$pn_diff <- pends_p$m-nends_p$m
pends_p$pn_rt_diff <- pends_p$mrt-nends_p$mrt
}
Depression relationship
cor.test(fdf_depr$dd1_red.dass_sum_score, fdf_depr$v_val_ctr_)
##
## Pearson's product-moment correlation
##
## data: fdf_depr$dd1_red.dass_sum_score and fdf_depr$v_val_ctr_
## t = -4.0199, df = 93, p-value = 0.0001182
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.5441034 -0.1986105
## sample estimates:
## cor
## -0.3847524
cor.test(fdf_depr$dd1_red.dass_sum_score, fdf_depr$pos_neg_ends_diff)
##
## Pearson's product-moment correlation
##
## data: fdf_depr$dd1_red.dass_sum_score and fdf_depr$pos_neg_ends_diff
## t = -4.4162, df = 93, p-value = 2.709e-05
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.5700670 -0.2344955
## sample estimates:
## cor
## -0.4163609
Figure 4B, right
dvv <- ggplot(fdf_depr, aes(v_val_ctr_, dd1_red.dass_sum_score)) +
geom_point(size=8, alpha=1, pch=21, fill="gray57") +
#ggtitle(paste0( "r=-.30 p < .001 ")) +
geom_smooth(method="lm", color="black") +
ga + ap +
geom_smooth(method="lm", color="black") +
ga + theme(plot.title = element_text(margin=margin(b=-3),
size=30, hjust=.15, vjust=.6)) + theme(axis.title = element_text(size=35), axis.text=element_text(size=35)) +
ylab("baseline depression \n (via DASS)") + xlab("positive > negative \n drift rate estimates") +
theme(plot.title = element_text(margin=margin(b=5),
size=27, hjust=.15, vjust=.6)) +
geom_text(x=2.1, y=24.5, size=15, label="*****") + ylim(0, 37)
dvv
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
#ggsave("../paper/figs/depr_v-val.png", dvv, width=9, height=7)
Mindfulness
# Match the IDs in the qairre and map ests datasets
m1_r <- m1 %>%
filter(ID %in% intersect(d1me$ID, m1$ID)) %>% arrange(ID)
d1_r <- d1me %>% filter(ID %in% intersect(d1me$ID, m1_r$ID)) %>% arrange(ID)
# Combine them
if (all(d1_r$ID == m1_r$ID)) fdf <- data.frame(d1_r,
m1_r$aware_sum, m1_r$nonjudge_sum, m1_r$nonreact_sum, m1_r$observe_sum)
Add the endorsement difference variable
ends_sub_m <- pends_b %>% filter(subj_idx %in% m1_r$ID)
if (all(m1_r$ID == ends_sub_m$subj_idx)) fdf <- data.frame(fdf, "pos_neg_diff_m"=ends_sub_m$pn_diff)
summary(
md_ddm_1 <- lm(v_val_ctr_ ~
scale(m1_r.aware_sum)*scale(m1_r.nonjudge_sum) +
scale(m1_r.nonreact_sum) ,
data=fdf)
)
##
## Call:
## lm(formula = v_val_ctr_ ~ scale(m1_r.aware_sum) * scale(m1_r.nonjudge_sum) +
## scale(m1_r.nonreact_sum), data = fdf)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.70168 -0.26863 -0.02867 0.23972 0.91191
##
## Coefficients:
## Estimate Std. Error t value
## (Intercept) 1.322348 0.039716 33.295
## scale(m1_r.aware_sum) 0.104288 0.041960 2.485
## scale(m1_r.nonjudge_sum) 0.050452 0.045052 1.120
## scale(m1_r.nonreact_sum) 0.019338 0.044843 0.431
## scale(m1_r.aware_sum):scale(m1_r.nonjudge_sum) 0.004105 0.036826 0.111
## Pr(>|t|)
## (Intercept) <2e-16 ***
## scale(m1_r.aware_sum) 0.0148 *
## scale(m1_r.nonjudge_sum) 0.2658
## scale(m1_r.nonreact_sum) 0.6673
## scale(m1_r.aware_sum):scale(m1_r.nonjudge_sum) 0.9115
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.359 on 90 degrees of freedom
## Multiple R-squared: 0.1432, Adjusted R-squared: 0.1052
## F-statistic: 3.761 on 4 and 90 DF, p-value: 0.007092
car::vif(md_ddm_1)
## scale(m1_r.aware_sum)
## 1.284368
## scale(m1_r.nonjudge_sum)
## 1.480637
## scale(m1_r.nonreact_sum)
## 1.466887
## scale(m1_r.aware_sum):scale(m1_r.nonjudge_sum)
## 1.164014
Compare to raw endorsement difference as outcome var
summary(
md_raw_1 <- lm(pos_neg_diff_m ~
scale(m1_r.aware_sum)*scale(m1_r.nonjudge_sum) +
scale(m1_r.nonreact_sum) ,
data=fdf)
)
##
## Call:
## lm(formula = pos_neg_diff_m ~ scale(m1_r.aware_sum) * scale(m1_r.nonjudge_sum) +
## scale(m1_r.nonreact_sum), data = fdf)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.54962 -0.08583 -0.00330 0.14273 0.32519
##
## Coefficients:
## Estimate Std. Error t value
## (Intercept) 0.714502 0.019190 37.233
## scale(m1_r.aware_sum) 0.063934 0.020275 3.153
## scale(m1_r.nonjudge_sum) 0.023046 0.021769 1.059
## scale(m1_r.nonreact_sum) 0.001903 0.021667 0.088
## scale(m1_r.aware_sum):scale(m1_r.nonjudge_sum) -0.008999 0.017794 -0.506
## Pr(>|t|)
## (Intercept) < 2e-16 ***
## scale(m1_r.aware_sum) 0.00219 **
## scale(m1_r.nonjudge_sum) 0.29259
## scale(m1_r.nonreact_sum) 0.93020
## scale(m1_r.aware_sum):scale(m1_r.nonjudge_sum) 0.61429
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.1734 on 90 degrees of freedom
## Multiple R-squared: 0.1684, Adjusted R-squared: 0.1314
## F-statistic: 4.556 on 4 and 90 DF, p-value: 0.002135
car::vif(md_raw_1)
## scale(m1_r.aware_sum)
## 1.284368
## scale(m1_r.nonjudge_sum)
## 1.480637
## scale(m1_r.nonreact_sum)
## 1.466887
## scale(m1_r.aware_sum):scale(m1_r.nonjudge_sum)
## 1.164014
out <- summary(md_ddm_1)
reg_df <- data.table(c("aware", "non-judge", "non-react", "aware*non-judge"),
out$coefficients[2:5, 1],
out$coefficients[2:5, 2]) %>% setNames(c("coefficient", "estimate", "se"))
reg_df$coefficient <- factor(reg_df$coefficient, levels=c("aware", "non-judge", "non-react", "aware*non-judge"))
mr <- ggplot(reg_df, aes(x=coefficient, y=estimate)) +
geom_errorbar(aes(ymin=estimate-se, ymax=estimate+se), size=2, width=0) +
geom_point(fill="gray57", color="black", alpha=.8, size=8, pch=21) +
geom_hline(yintercept=c(0), linetype="dotted") +
theme(axis.text.x = element_text(angle=25, hjust=1)) +
ga + ap + ylab("coefficients (±1 SEM)") + xlab("") +
ggtitle("Regression coefficients \n of pos > neg drift rate ~ FFMQ mindfulness facets") +
theme(plot.title = element_text(margin=margin(b=5),
size=27, hjust=.15, vjust=.6)) +
geom_text(x=1, y=.17, size=15, label="*") + ylim(-.05, .18) +
theme(axis.text = element_text(size=30),
axis.title = element_text(size=30))
mr
#ggsave("../paper/figs/mindful_regr.png", mr, width=13, height=6)
Adding observe
summary(
md_wobs_1 <- lm(v_val_ctr_ ~
scale(m1_r.aware_sum)*scale(m1_r.nonjudge_sum) +
scale(m1_r.nonreact_sum) + scale(m1_r.observe_sum),
data=fdf)
)
##
## Call:
## lm(formula = v_val_ctr_ ~ scale(m1_r.aware_sum) * scale(m1_r.nonjudge_sum) +
## scale(m1_r.nonreact_sum) + scale(m1_r.observe_sum), data = fdf)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.70167 -0.26870 -0.02481 0.24000 0.90945
##
## Coefficients:
## Estimate Std. Error t value
## (Intercept) 1.322071 0.040042 33.017
## scale(m1_r.aware_sum) 0.104159 0.042215 2.467
## scale(m1_r.nonjudge_sum) 0.051018 0.045690 1.117
## scale(m1_r.nonreact_sum) 0.018006 0.047208 0.381
## scale(m1_r.observe_sum) 0.003742 0.039247 0.095
## scale(m1_r.aware_sum):scale(m1_r.nonjudge_sum) 0.004791 0.037723 0.127
## Pr(>|t|)
## (Intercept) <2e-16 ***
## scale(m1_r.aware_sum) 0.0155 *
## scale(m1_r.nonjudge_sum) 0.2672
## scale(m1_r.nonreact_sum) 0.7038
## scale(m1_r.observe_sum) 0.9243
## scale(m1_r.aware_sum):scale(m1_r.nonjudge_sum) 0.8992
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.361 on 89 degrees of freedom
## Multiple R-squared: 0.1433, Adjusted R-squared: 0.09519
## F-statistic: 2.978 on 5 and 89 DF, p-value: 0.01564
car::vif(md_wobs_1)
## scale(m1_r.aware_sum)
## 1.285691
## scale(m1_r.nonjudge_sum)
## 1.506095
## scale(m1_r.nonreact_sum)
## 1.607810
## scale(m1_r.observe_sum)
## 1.111253
## scale(m1_r.aware_sum):scale(m1_r.nonjudge_sum)
## 1.207987
summary(
md_wobs_2 <- lm(v_val_ctr_ ~
scale(m1_r.aware_sum)*scale(m1_r.nonjudge_sum) +
scale(m1_r.nonreact_sum) + scale(m1_r.observe_sum)*scale(m1_r.nonjudge_sum),
data=fdf)
)
##
## Call:
## lm(formula = v_val_ctr_ ~ scale(m1_r.aware_sum) * scale(m1_r.nonjudge_sum) +
## scale(m1_r.nonreact_sum) + scale(m1_r.observe_sum) * scale(m1_r.nonjudge_sum),
## data = fdf)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.69522 -0.27415 -0.03104 0.23719 0.89794
##
## Coefficients:
## Estimate Std. Error t value
## (Intercept) 1.319376 0.040230 32.796
## scale(m1_r.aware_sum) 0.108939 0.042654 2.554
## scale(m1_r.nonjudge_sum) 0.055135 0.046018 1.198
## scale(m1_r.nonreact_sum) 0.006982 0.049032 0.142
## scale(m1_r.observe_sum) -0.004312 0.040437 -0.107
## scale(m1_r.aware_sum):scale(m1_r.nonjudge_sum) 0.008756 0.038070 0.230
## scale(m1_r.nonjudge_sum):scale(m1_r.observe_sum) 0.030466 0.035884 0.849
## Pr(>|t|)
## (Intercept) <2e-16 ***
## scale(m1_r.aware_sum) 0.0124 *
## scale(m1_r.nonjudge_sum) 0.2341
## scale(m1_r.nonreact_sum) 0.8871
## scale(m1_r.observe_sum) 0.9153
## scale(m1_r.aware_sum):scale(m1_r.nonjudge_sum) 0.8186
## scale(m1_r.nonjudge_sum):scale(m1_r.observe_sum) 0.3982
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.3615 on 88 degrees of freedom
## Multiple R-squared: 0.1503, Adjusted R-squared: 0.09234
## F-statistic: 2.594 on 6 and 88 DF, p-value: 0.02317
car::vif(md_wobs_2)
## scale(m1_r.aware_sum)
## 1.308487
## scale(m1_r.nonjudge_sum)
## 1.523001
## scale(m1_r.nonreact_sum)
## 1.729043
## scale(m1_r.observe_sum)
## 1.175969
## scale(m1_r.aware_sum):scale(m1_r.nonjudge_sum)
## 1.226443
## scale(m1_r.nonjudge_sum):scale(m1_r.observe_sum)
## 1.202048
Robustness check: Confirm effects are same with casewise deletion
# Match the IDs in the qairre and map ests datasets
m1_r_cd <- m1_casewise_delete %>%
filter(ID %in% intersect(d1me$ID, m1_casewise_delete$ID)) %>% arrange(ID)
d1_r_cd <- d1me %>% filter(ID %in% intersect(d1me$ID, m1_casewise_delete$ID)) %>% arrange(ID)
# Combine them
if (all(d1_r_cd$ID == m1_r_cd$ID)) fdf_cd <- data.frame(d1_r_cd,
m1_r_cd$aware_sum,
m1_r_cd$nonreact_sum,
m1_r_cd$nonjudge_sum)
summary(
md_d_1_cd <- lm(v_val_ctr_ ~
scale(m1_r_cd.aware_sum)*scale(m1_r_cd.nonjudge_sum) +
scale(m1_r_cd.nonreact_sum),
data=fdf_cd)
)
##
## Call:
## lm(formula = v_val_ctr_ ~ scale(m1_r_cd.aware_sum) * scale(m1_r_cd.nonjudge_sum) +
## scale(m1_r_cd.nonreact_sum), data = fdf_cd)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.74755 -0.24873 -0.02561 0.22460 0.87001
##
## Coefficients:
## Estimate Std. Error
## (Intercept) 1.365236 0.040250
## scale(m1_r_cd.aware_sum) 0.101092 0.042752
## scale(m1_r_cd.nonjudge_sum) 0.045756 0.045607
## scale(m1_r_cd.nonreact_sum) 0.037034 0.046448
## scale(m1_r_cd.aware_sum):scale(m1_r_cd.nonjudge_sum) -0.006047 0.037442
## t value Pr(>|t|)
## (Intercept) 33.919 <2e-16 ***
## scale(m1_r_cd.aware_sum) 2.365 0.0204 *
## scale(m1_r_cd.nonjudge_sum) 1.003 0.3187
## scale(m1_r_cd.nonreact_sum) 0.797 0.4276
## scale(m1_r_cd.aware_sum):scale(m1_r_cd.nonjudge_sum) -0.162 0.8721
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.3487 on 81 degrees of freedom
## Multiple R-squared: 0.1557, Adjusted R-squared: 0.114
## F-statistic: 3.735 on 4 and 81 DF, p-value: 0.007702
car::vif(md_d_1_cd)
## scale(m1_r_cd.aware_sum)
## 1.277384
## scale(m1_r_cd.nonjudge_sum)
## 1.453664
## scale(m1_r_cd.nonreact_sum)
## 1.507804
## scale(m1_r_cd.aware_sum):scale(m1_r_cd.nonjudge_sum)
## 1.170969
fd_r <- fdf %>% filter(ID %in% dd1_casewise_delete$ID) #%>% select(v_val_ctr_)
d1_r <- dd1_casewise_delete %>% filter(ID %in% fd_r$ID)
if (all(d1_r$ID==fd_r$ID)) cor.test(fd_r$v_val_ctr_, d1_r$dass_sum_score)
##
## Pearson's product-moment correlation
##
## data: fd_r$v_val_ctr_ and d1_r$dass_sum_score
## t = -3.387, df = 90, p-value = 0.00105
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.5061892 -0.1411328
## sample estimates:
## cor
## -0.336231
Mindfulness and depression in same
if (all(fdf$ID == fdf_depr$ID)) fdf <- data.frame(fdf, fdf_depr$dd1_red.dass_sum_score)
Depression relationship
# Sanity check
#cor.test(fdf$fdf_depr.dd1_red.dass_sum_score, fdf$v_val_ctr_)
cor.test(fdf$fdf_depr.dd1_red.dass_sum_score, fdf$m1_r.aware_sum)
##
## Pearson's product-moment correlation
##
## data: fdf$fdf_depr.dd1_red.dass_sum_score and fdf$m1_r.aware_sum
## t = -4.4046, df = 93, p-value = 2.832e-05
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.5693236 -0.2334552
## sample estimates:
## cor
## -0.4154507
Check other mindfulness facet - depression relationships
- Comparably strong relationship with non-judgment but weaker with non-react
cor.test(fdf$fdf_depr.dd1_red.dass_sum_score, fdf$m1_r.nonjudge_sum)
##
## Pearson's product-moment correlation
##
## data: fdf$fdf_depr.dd1_red.dass_sum_score and fdf$m1_r.nonjudge_sum
## t = -4.0497, df = 93, p-value = 0.0001061
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.5461076 -0.2013479
## sample estimates:
## cor
## -0.3871791
cor.test(fdf$fdf_depr.dd1_red.dass_sum_score, fdf$m1_r.nonreact_sum)
##
## Pearson's product-moment correlation
##
## data: fdf$fdf_depr.dd1_red.dass_sum_score and fdf$m1_r.nonreact_sum
## t = -1.4622, df = 93, p-value = 0.1471
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.3411428 0.0532451
## sample estimates:
## cor
## -0.1499066
summary(
all_m <- lm(v_val_ctr_ ~
scale(m1_r.aware_sum)*scale(m1_r.nonjudge_sum) +
scale(m1_r.nonreact_sum) + scale(fdf_depr.dd1_red.dass_sum_score),
data=fdf)
)
##
## Call:
## lm(formula = v_val_ctr_ ~ scale(m1_r.aware_sum) * scale(m1_r.nonjudge_sum) +
## scale(m1_r.nonreact_sum) + scale(fdf_depr.dd1_red.dass_sum_score),
## data = fdf)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.7122 -0.2505 -0.0240 0.2466 0.8261
##
## Coefficients:
## Estimate Std. Error t value
## (Intercept) 1.31119 0.03862 33.955
## scale(m1_r.aware_sum) 0.06121 0.04358 1.405
## scale(m1_r.nonjudge_sum) 0.02399 0.04464 0.537
## scale(m1_r.nonreact_sum) 0.02105 0.04335 0.485
## scale(fdf_depr.dd1_red.dass_sum_score) -0.11530 0.04266 -2.703
## scale(m1_r.aware_sum):scale(m1_r.nonjudge_sum) 0.03175 0.03704 0.857
## Pr(>|t|)
## (Intercept) < 2e-16 ***
## scale(m1_r.aware_sum) 0.16362
## scale(m1_r.nonjudge_sum) 0.59239
## scale(m1_r.nonreact_sum) 0.62855
## scale(fdf_depr.dd1_red.dass_sum_score) 0.00823 **
## scale(m1_r.aware_sum):scale(m1_r.nonjudge_sum) 0.39365
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.347 on 89 degrees of freedom
## Multiple R-squared: 0.2082, Adjusted R-squared: 0.1637
## F-statistic: 4.681 on 5 and 89 DF, p-value: 0.0007688
car::vif(all_m)
## scale(m1_r.aware_sum)
## 1.482614
## scale(m1_r.nonjudge_sum)
## 1.555482
## scale(m1_r.nonreact_sum)
## 1.467199
## scale(fdf_depr.dd1_red.dass_sum_score)
## 1.420407
## scale(m1_r.aware_sum):scale(m1_r.nonjudge_sum)
## 1.260104
Univariate
cor.test(fdf$v_val_ctr_, fdf$m1_r.aware_sum)
##
## Pearson's product-moment correlation
##
## data: fdf$v_val_ctr_ and fdf$m1_r.aware_sum
## t = 3.5931, df = 93, p-value = 0.0005249
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.1587662 0.5144785
## sample estimates:
## cor
## 0.3491374
Add dosage for time
# big_df <- haven::read_sav("./../../data/raw_files/K23_Hitchcock_medpracticedata.sav")
# big_df <- big_df %>% filter(ID %in% fdf$ID) %>% arrange(ID)
# fdf_md <- fdf %>% filter(ID %in% big_df$ID) %>% arrange(ID)
# if (all(fdf_md$ID==big_df$ID)) cor.test(fdf_md$v_val_ctr.sess_ctr_, big_df$TOTALminpwerwk.8wks)
Recreate post - pre scores
# Post scores
nends_p <- bx_df %>% filter(session=="Post", valence=="negative") %>%
group_by(subj_idx) %>% summarize(m=mean(response), mrt=mean(rt))
pends_p <- bx_df %>% filter(session=="Post", valence=="positive") %>% group_by(subj_idx) %>% summarize(m=mean(response), mrt=mean(rt))
if (all(pends_p$subj_idx==nends_p$subj_idx)) {
pends_p$pn_diff <- pends_p$m-nends_p$m
}
Create a difference variable of (post pos - neg) - (pre pos - neg)
if (all(pends_p$subj_idx==pends_b$subj_idx)) pends_p$diff_in_diff_pn_ends <- pends_p$pn_diff - pends_b$pn_diff
We are just looking at FFMQ and depression at pre- and post-intervention and want to relate these to the SRET valence*time regressor. Had originally tried mixed-effects models to try to get time REs, but had convergence issues which make sense given low data per pt, so opting for simpler approach of change scores
m1_shared <- m1 %>% filter(ID %in% m2$ID)
m2_shared <- m2 %>% filter(ID %in% m1$ID)
if (all(m1_shared$ID==m2_shared$ID)) {
mindfulness_changes <- data.frame(m1_shared$ID,
m2_shared$aware_sum-m1_shared$aware_sum,
m2_shared$nonjudge_sum-m1_shared$nonjudge_sum,
m2_shared$nonreact_sum-m1_shared$nonreact_sum,
m2_shared$observe_sum-m1_shared$observe_sum) %>%
setNames(c("ID", "aware_change", "nj_change", "nr_change", "obs_change"))
}
hist(mindfulness_changes$nr_change)
hist(mindfulness_changes$nj_change)
hist(mindfulness_changes$aware_change)
hist(mindfulness_changes$obs_change)
#hist(fdf$FFMQ_Aware_SUM_Q1)
#hist(fdf$FFMQ_Nonjudge_SUM_Q1)
a <- ggplot(fdf, aes(x=m1_r.aware_sum)) +
geom_histogram(bins=25, fill="gray38", color="black") +
xlim(5, 45) + ga + theme(axis.text=element_blank(), axis.ticks = element_blank()) +
ylab("") + xlab("") + ggtitle("Distributions of Mindfulness Facets at Baseline") +
theme(plot.title = element_text(margin=margin(b=-5),
size=35, hjust=.15, vjust=.6)) +
annotate("text", x = 40, y = 10, label = "Aware", size=10)
# annotate("text", x = 2.45, y = .85, label = TeX("yellow: $\\alpha$ < median"), size=13)
b <- ggplot(fdf, aes(x=m1_r.nonjudge_sum)) +
geom_histogram(bins=25, fill="gray38", color="black") +
xlim(5, 45) + ga + ap + ylab("") + xlab("") +
theme(axis.text.y=element_blank(), axis.ticks.y = element_blank()) +
annotate("text", x = 35, y = 10, label = "Non-judgmental", size=10)
comb_m <- a/b
comb_m
## Warning: Removed 2 rows containing missing values (geom_bar).
## Removed 2 rows containing missing values (geom_bar).
#ggsave("../paper/figs/mind_dist.png", comb_m, width=18, height=8)
# mindfulness_changes$aware_change
# mindfulness_changes$aware_change
# range(mindfulness_changes$aware_change)
# range(mindfulness_changes$nj_change)
c <- ggplot(mindfulness_changes, aes(x=aware_change)) +
geom_histogram(bins=25, fill="gray75", color="black") +
ga + theme(axis.text=element_blank(), axis.ticks = element_blank()) +
ylab("") + xlab("") + ggtitle("Distributions of Change (Post - Pre) in Mindfulness Facets") +
theme(plot.title = element_text(margin=margin(b=-5),
size=35, hjust=.15, vjust=.6)) +
xlim(-14, 27) +
annotate("text", x = 17, y = 11, label = "Aware", size=10)
# annotate("text", x = 2.45, y = .85, label = TeX("yellow: $\\alpha$ < median"), size=13)
d <- ggplot(mindfulness_changes, aes(x=nj_change)) +
geom_histogram(bins=25, fill="gray75", color="black") +
ga + ap + ylab("") + xlab("") +
theme(axis.text.y=element_blank(), axis.ticks.y = element_blank()) +
xlim(-14, 27) +
annotate("text", x = 17, y = 15, label = "Non-judgmental", size=10)
comb_ch <- c/d
comb_ch
## Warning: Removed 2 rows containing missing values (geom_bar).
## Removed 2 rows containing missing values (geom_bar).
#ggsave("../paper/figs/mind_ch.png", comb_ch, width=18, height=8)
d1me_shared <- d1me %>% filter(ID %in% mindfulness_changes$ID)
Examining how the machine learning procedure by Beevers et al. 19 JAP influences sparsity. Model draws on code included in the paper
var_names <- c(
# 3, 5, 10, 13
"no-positive-feelings", "couldnt-get-going", "easily-upset", "sad-and-depressed",
# 16, 17, 21
"lost-interest", "not-worth-much-as-person", "life-not-worthwhile",
# 24, 26, 31, 34, 37,
"no-enjoyment", "down-hearted-and-blue", "no-enthusiasm", "worthless", "no-hope",
#38, 42
"life-meaningless", "no-initiative",
"drift_regressor"
)
Empirical
if (all(d1_red$ID == dd1_red$ID)) {
t1_depr_combined <- data.frame(dd1_red, "v_val_ctr"=d1_red$v_val_ctr_)
}
#cor.test(t1_depr_combined$v_val_ctr, t1_depr_combined$dass_sum_score)
cmat <- cor(t1_depr_combined %>% select(-c(ID, dass_sum_score)))
#cmat
just_rel_vars_emp <- cbind(t1_depr_combined[grep("DASS1", names(t1_depr_combined))], t1_depr_combined$v_val_ctr) %>%
setNames(var_names)
rf_out_emp <- beset_rf(drift_regressor ~ ., data=just_rel_vars_emp)
importance(rf_out_emp)
To examine the influence of sample size
(commented because time consuming)
# sample_sizes <- c(100, 200, 500, 1e3, 5e3)
#
#
# for (ss in seq_along(sample_sizes)) {
# simulated_data <- rnorm_multi(n=sample_sizes[ss], vars=ncol(cmat), mu=0, sd=1, r=cmat)
# # Impose constraints on the depression items
# depr_items <- round(simulated_data[1:14, 1:14], 0)
# depr_items[depr_items < 0] <- 0
# depr_items[depr_items > 3] <- 3
#
# # Put them back in
# simulated_data[1:14, 1:14] <- depr_items
# simulated_data <- simulated_data %>% setNames(var_names)
#
# rf_out <- beset_rf(drift_regressor ~ ., data=simulated_data)
# print(importance(rf_out))
# }
Mindfulness change
# Add drift rate change regressor
if (all(d1me_shared$ID==mindfulness_changes$ID)) {
mindfulness_changes$dr_change <- d1me_shared$v_val_ctr.sess_ctr_
}
Add scaled variables
mindfulness_changes <-
data.frame(mindfulness_changes,
data.frame(
"aware_ch_sc"=scale(mindfulness_changes$aware_change),
"nj_ch_sc"=scale(mindfulness_changes$nj_change),
"nr_ch_sc"=scale(mindfulness_changes$nr_change))
)
Add the difference in difference endorsements variable to the df with mindfulness changes
diff_ends_sub <- pends_p %>% filter(subj_idx %in% mindfulness_changes$ID)
if (all(diff_ends_sub$subj_idx==mindfulness_changes$ID)) mindfulness_changes<- data.frame(mindfulness_changes, "diff_in_diff_posneg"=diff_ends_sub$diff_in_diff_pn_ends)
summary(
change_1m <-
lm(dr_change ~
aware_ch_sc*nj_ch_sc +
nr_ch_sc,
data=mindfulness_changes)
)
##
## Call:
## lm(formula = dr_change ~ aware_ch_sc * nj_ch_sc + nr_ch_sc, data = mindfulness_changes)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.50841 -0.11252 -0.01161 0.11116 0.46922
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.128739 0.020388 6.315 1.07e-08 ***
## aware_ch_sc 0.060368 0.022153 2.725 0.00776 **
## nj_ch_sc 0.025647 0.023174 1.107 0.27143
## nr_ch_sc -0.030873 0.021720 -1.421 0.15872
## aware_ch_sc:nj_ch_sc -0.007739 0.016166 -0.479 0.63332
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.1808 on 88 degrees of freedom
## Multiple R-squared: 0.133, Adjusted R-squared: 0.09361
## F-statistic: 3.375 on 4 and 88 DF, p-value: 0.01283
car::vif(change_1m)
## aware_ch_sc nj_ch_sc nr_ch_sc
## 1.381155 1.511342 1.327653
## aware_ch_sc:nj_ch_sc
## 1.034201
summary(
change_raw_m <-
lm(diff_in_diff_posneg ~
aware_ch_sc*nj_ch_sc +
nr_ch_sc,
data=mindfulness_changes)
)
##
## Call:
## lm(formula = diff_in_diff_posneg ~ aware_ch_sc * nj_ch_sc + nr_ch_sc,
## data = mindfulness_changes)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.53084 -0.08525 -0.00655 0.09687 0.47572
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.070107 0.017923 3.912 0.00018 ***
## aware_ch_sc 0.043281 0.019475 2.222 0.02882 *
## nj_ch_sc 0.008860 0.020372 0.435 0.66470
## nr_ch_sc -0.003591 0.019094 -0.188 0.85124
## aware_ch_sc:nj_ch_sc 0.007342 0.014212 0.517 0.60672
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.1589 on 88 degrees of freedom
## Multiple R-squared: 0.08538, Adjusted R-squared: 0.04381
## F-statistic: 2.054 on 4 and 88 DF, p-value: 0.09373
car::vif(change_raw_m)
## aware_ch_sc nj_ch_sc nr_ch_sc
## 1.381155 1.511342 1.327653
## aware_ch_sc:nj_ch_sc
## 1.034201
out_2 <- summary(change_1m)
out_2
##
## Call:
## lm(formula = dr_change ~ aware_ch_sc * nj_ch_sc + nr_ch_sc, data = mindfulness_changes)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.50841 -0.11252 -0.01161 0.11116 0.46922
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.128739 0.020388 6.315 1.07e-08 ***
## aware_ch_sc 0.060368 0.022153 2.725 0.00776 **
## nj_ch_sc 0.025647 0.023174 1.107 0.27143
## nr_ch_sc -0.030873 0.021720 -1.421 0.15872
## aware_ch_sc:nj_ch_sc -0.007739 0.016166 -0.479 0.63332
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.1808 on 88 degrees of freedom
## Multiple R-squared: 0.133, Adjusted R-squared: 0.09361
## F-statistic: 3.375 on 4 and 88 DF, p-value: 0.01283
reg_df_2 <- data.table(c("aware-change", "non-judge-change", "non-react-change", "aware*non-judge-change"),
out_2$coefficients[2:5, 1],
out_2$coefficients[2:5, 2]) %>% setNames(c("coefficient", "estimate", "se"))
reg_df_2$coefficient <- factor(reg_df_2$coefficient, levels=c("aware-change", "non-judge-change", "non-react-change", "aware*non-judge-change"))
mr_2 <- ggplot(reg_df_2, aes(x=coefficient, y=estimate)) +
geom_errorbar(aes(ymin=estimate-se, ymax=estimate+se), color="gray57", size=2, width=0) +
geom_point(fill="black", color="black", alpha=.3, size=8, pch=21) +
geom_hline(yintercept=c(0), linetype="dotted") +
theme(axis.text.x = element_text(angle=25, hjust=1)) +
ga + ap + ylab("coefficients (±1 SEM)") + xlab("") +
ggtitle(paste0("Regression coefficients \n of pos > neg drift rate * time ~ FFMQ mindfulness facets")) +
theme(plot.title = element_text(margin=margin(b=5),
size=27, hjust=.15, vjust=.6)) +
geom_text(x=1, y=.092, size=15, label="**") + ylim(-.08, .10) +
theme(axis.text = element_text(size=30),
axis.title = element_text(size=30)) +
scale_x_discrete(labels=c("aware-change" = "aware", "non-judge-change" = "non-judge",
"non-react-change" = "non-react", "aware*non-judge-change" = "aware*non-judge"))
mr_2
#ggsave("../paper/figs/mindful_regr_change.png", mr_2, width=13, height=6)
Robustness check - add treatment condition
assign_short <- extra_df_full %>% filter(ID %in% mindfulness_changes$ID)
testit::assert(assign_short$ID == mindfulness_changes$ID)
mindfulness_changes$tx_cond <- assign_short$Treatment
mindfulness_changes$tx_cond_f <- factor(mindfulness_changes$tx_cond, levels=c("OM", "FA", "MBCT"))
contrasts(mindfulness_changes$tx_cond_f) <- contr.sum(3)
#mindfulness_changes$tx_cond_f
summary(
change_1m_tx <-
lm(dr_change ~ tx_cond_f/(
scale(aware_change)*scale(nj_change) +
scale(nr_change)),
data=mindfulness_changes))
##
## Call:
## lm(formula = dr_change ~ tx_cond_f/(scale(aware_change) * scale(nj_change) +
## scale(nr_change)), data = mindfulness_changes)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.44785 -0.08492 -0.00807 0.10600 0.42981
##
## Coefficients:
## Estimate Std. Error t value
## (Intercept) 0.121422 0.019360 6.272
## tx_cond_f1 -0.044635 0.027901 -1.600
## tx_cond_f2 -0.001769 0.026902 -0.066
## tx_cond_fOM:scale(aware_change) 0.108505 0.040931 2.651
## tx_cond_fFA:scale(aware_change) -0.009815 0.036175 -0.271
## tx_cond_fMBCT:scale(aware_change) 0.134140 0.034059 3.938
## tx_cond_fOM:scale(nj_change) -0.002977 0.041566 -0.072
## tx_cond_fFA:scale(nj_change) 0.052491 0.035741 1.469
## tx_cond_fMBCT:scale(nj_change) 0.002369 0.043907 0.054
## tx_cond_fOM:scale(nr_change) -0.033392 0.043222 -0.773
## tx_cond_fFA:scale(nr_change) -0.005319 0.026502 -0.201
## tx_cond_fMBCT:scale(nr_change) -0.018680 0.054729 -0.341
## tx_cond_fOM:scale(aware_change):scale(nj_change) -0.055346 0.024277 -2.280
## tx_cond_fFA:scale(aware_change):scale(nj_change) 0.028655 0.033664 0.851
## tx_cond_fMBCT:scale(aware_change):scale(nj_change) 0.045439 0.029203 1.556
## Pr(>|t|)
## (Intercept) 1.84e-08 ***
## tx_cond_f1 0.113688
## tx_cond_f2 0.947753
## tx_cond_fOM:scale(aware_change) 0.009718 **
## tx_cond_fFA:scale(aware_change) 0.786868
## tx_cond_fMBCT:scale(aware_change) 0.000177 ***
## tx_cond_fOM:scale(nj_change) 0.943078
## tx_cond_fFA:scale(nj_change) 0.145954
## tx_cond_fMBCT:scale(nj_change) 0.957113
## tx_cond_fOM:scale(nr_change) 0.442115
## tx_cond_fFA:scale(nr_change) 0.841443
## tx_cond_fMBCT:scale(nr_change) 0.733780
## tx_cond_fOM:scale(aware_change):scale(nj_change) 0.025354 *
## tx_cond_fFA:scale(aware_change):scale(nj_change) 0.397266
## tx_cond_fMBCT:scale(aware_change):scale(nj_change) 0.123763
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.1664 on 78 degrees of freedom
## Multiple R-squared: 0.3493, Adjusted R-squared: 0.2326
## F-statistic: 2.991 on 14 and 78 DF, p-value: 0.001055
Robustness check: Add observe change
summary(
change_0m <-
lm(dr_change ~
scale(aware_change)*scale(nj_change) +
scale(nr_change) +
scale(obs_change),
data=mindfulness_changes)
)
##
## Call:
## lm(formula = dr_change ~ scale(aware_change) * scale(nj_change) +
## scale(nr_change) + scale(obs_change), data = mindfulness_changes)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.51231 -0.11970 -0.00832 0.11091 0.47367
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.128903 0.020503 6.287 1.25e-08 ***
## scale(aware_change) 0.059524 0.022465 2.650 0.00957 **
## scale(nj_change) 0.026254 0.023393 1.122 0.26482
## scale(nr_change) -0.034735 0.025688 -1.352 0.17981
## scale(obs_change) 0.006682 0.023414 0.285 0.77603
## scale(aware_change):scale(nj_change) -0.008070 0.016293 -0.495 0.62162
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.1818 on 87 degrees of freedom
## Multiple R-squared: 0.1338, Adjusted R-squared: 0.08405
## F-statistic: 2.688 on 5 and 87 DF, p-value: 0.02624
car::vif(change_0m)
## scale(aware_change) scale(nj_change)
## 1.405536 1.523951
## scale(nr_change) scale(obs_change)
## 1.837643 1.526737
## scale(aware_change):scale(nj_change)
## 1.039469
summary(
change_1m <-
lm(dr_change ~
scale(aware_change)*scale(nj_change) +
scale(nr_change) +
scale(obs_change)*scale(nj_change),
data=mindfulness_changes)
)
##
## Call:
## lm(formula = dr_change ~ scale(aware_change) * scale(nj_change) +
## scale(nr_change) + scale(obs_change) * scale(nj_change),
## data = mindfulness_changes)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.51286 -0.11085 -0.00314 0.11083 0.47533
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.130476 0.020740 6.291 1.27e-08 ***
## scale(aware_change) 0.059942 0.022558 2.657 0.00939 **
## scale(nj_change) 0.027343 0.023546 1.161 0.24875
## scale(nr_change) -0.039025 0.026730 -1.460 0.14795
## scale(obs_change) 0.007449 0.023533 0.317 0.75237
## scale(aware_change):scale(nj_change) -0.006046 0.016688 -0.362 0.71801
## scale(nj_change):scale(obs_change) -0.011261 0.018533 -0.608 0.54505
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.1824 on 86 degrees of freedom
## Multiple R-squared: 0.1375, Adjusted R-squared: 0.07736
## F-statistic: 2.286 on 6 and 86 DF, p-value: 0.0428
car::vif(change_1m)
## scale(aware_change) scale(nj_change)
## 1.406847 1.532839
## scale(nr_change) scale(obs_change)
## 1.975434 1.531142
## scale(aware_change):scale(nj_change) scale(nj_change):scale(obs_change)
## 1.082606 1.156170
Robustness checks: Holds in univariate model
cor.test(mindfulness_changes$dr_change, mindfulness_changes$aware_change)
##
## Pearson's product-moment correlation
##
## data: mindfulness_changes$dr_change and mindfulness_changes$aware_change
## t = 3.3249, df = 91, p-value = 0.001276
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.1344312 0.4993550
## sample estimates:
## cor
## 0.3291264
Depression change
dd1_shared <- dd1 %>% filter(ID %in% dd2$ID)
dd2_shared <- dd2 %>% filter(ID %in% dd1$ID)
if (all(dd1_shared$ID==dd2_shared$ID)) {
depr_changes <- data.frame(dd1_shared$ID,
dd2_shared$dass_sum_score-dd1_shared$dass_sum_score) %>%
setNames(c("ID", "depr_change"))
}
hist(depr_changes$depr_change)
Subset map ests…
d1me_shared_2 <- d1me %>% filter(ID %in% depr_changes$ID)
… and endorsement changes
diff_ch_subs_d <- pends_p %>% filter(subj_idx %in% depr_changes$ID)
Depression change
# Add drift rate change regressor
if (all(d1me_shared_2$ID==depr_changes$ID)) {
depr_changes$dr_change <- d1me_shared_2$v_val_ctr.sess_ctr_
}
# Add drift rate change regressor
if (all(diff_ch_subs_d$subj_idx==depr_changes$ID)) {
depr_changes$ends_change <- diff_ch_subs_d$diff_in_diff_pn_ends
}
cor.test(depr_changes$dr_change, depr_changes$depr_change)
##
## Pearson's product-moment correlation
##
## data: depr_changes$dr_change and depr_changes$depr_change
## t = -2.0458, df = 91, p-value = 0.04367
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.396459540 -0.006244396
## sample estimates:
## cor
## -0.2096859
cor.test(depr_changes$ends_change, depr_changes$depr_change)
##
## Pearson's product-moment correlation
##
## data: depr_changes$ends_change and depr_changes$depr_change
## t = -1.8054, df = 91, p-value = 0.07431
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.37544557 0.01844734
## sample estimates:
## cor
## -0.1859598
assign_short_2 <- extra_df_full %>% filter(ID %in% depr_changes$ID)
testit::assert(assign_short_2$ID == depr_changes$ID)
depr_changes$tx_cond <- assign_short_2$Treatment
depr_changes$tx_cond_f <- factor(depr_changes$tx_cond, levels=c("OM", "FA", "MBCT"))
contrasts(depr_changes$tx_cond_f) <- contr.sum(3)
Check dependence on treatment condition
summary(m1 <- lm(dr_change ~ depr_change, data=depr_changes))
##
## Call:
## lm(formula = dr_change ~ depr_change, data = depr_changes)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.56935 -0.12910 -0.00318 0.12334 0.41858
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.090890 0.025520 3.561 0.000589 ***
## depr_change -0.006563 0.003208 -2.046 0.043667 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.1867 on 91 degrees of freedom
## Multiple R-squared: 0.04397, Adjusted R-squared: 0.03346
## F-statistic: 4.185 on 1 and 91 DF, p-value: 0.04367
summary(m1 <- lm(dr_change ~ tx_cond_f/depr_change, data=depr_changes))
##
## Call:
## lm(formula = dr_change ~ tx_cond_f/depr_change, data = depr_changes)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.39715 -0.10945 -0.00604 0.10149 0.41000
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.096127 0.025238 3.809 0.00026 ***
## tx_cond_f1 -0.026327 0.037552 -0.701 0.48513
## tx_cond_f2 -0.006310 0.034210 -0.184 0.85410
## tx_cond_fOM:depr_change 0.002326 0.006300 0.369 0.71287
## tx_cond_fFA:depr_change -0.011286 0.006154 -1.834 0.07007 .
## tx_cond_fMBCT:depr_change -0.008598 0.004496 -1.912 0.05912 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.1808 on 87 degrees of freedom
## Multiple R-squared: 0.143, Adjusted R-squared: 0.09377
## F-statistic: 2.904 on 5 and 87 DF, p-value: 0.01796
dc_vvs <- ggplot(depr_changes, aes(dr_change, depr_change)) +
geom_point(size=8, pch=21, alpha=.5, fill="gray57") +
#ggtitle(paste0( "r=-.30 p < .001 ")) +
#geom_smooth(method="lm", color="gray57") +
ga + ap +
geom_smooth(method="lm", color="gray57", alpha=.5) +
ga + theme(plot.title = element_text(margin=margin(b=-3),
size=30, hjust=.15, vjust=.6)) + theme(axis.title = element_text(size=35), axis.text=element_text(size=35)) +
ylab(" depression \n (via DASS)") + xlab("positive > negative \n drift rate * time estimates") +
theme(plot.title = element_text(margin=margin(b=5),
size=27, hjust=.15, vjust=.6)) + xlim(-.45, .75) +
geom_text(x=.55, y=0, size=15, label="*") #+ ylim(0, 37)
dc_vvs
## `geom_smooth()` using formula 'y ~ x'
#ggsave("../paper/figs/depr-change_v-val_sess.png", dc_vvs, width=9, height=7)
Robustness check: Adding depression as covariate in mindfulness model
if (all(mindfulness_changes$ID==depr_changes$ID)) {
mindfulness_changes <- data.frame(mindfulness_changes, depr_changes$depr_change)
}
summary(
change_2m <-
lm(dr_change ~
scale(aware_change)*scale(nj_change) +
scale(nr_change) + scale(depr_changes.depr_change),
data=mindfulness_changes)
)
##
## Call:
## lm(formula = dr_change ~ scale(aware_change) * scale(nj_change) +
## scale(nr_change) + scale(depr_changes.depr_change), data = mindfulness_changes)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.53744 -0.11422 -0.00769 0.11486 0.44203
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.128345 0.020440 6.279 1.3e-08 ***
## scale(aware_change) 0.057527 0.022503 2.556 0.0123 *
## scale(nj_change) 0.019432 0.024566 0.791 0.4311
## scale(nr_change) -0.028330 0.022014 -1.287 0.2015
## scale(depr_changes.depr_change) -0.016340 0.021043 -0.777 0.4395
## scale(aware_change):scale(nj_change) -0.006945 0.016235 -0.428 0.6699
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.1812 on 87 degrees of freedom
## Multiple R-squared: 0.139, Adjusted R-squared: 0.08951
## F-statistic: 2.809 on 5 and 87 DF, p-value: 0.02123
cor.test(mindfulness_changes$depr_changes.depr_change, mindfulness_changes$aware_change)
##
## Pearson's product-moment correlation
##
## data: mindfulness_changes$depr_changes.depr_change and mindfulness_changes$aware_change
## t = -3.1211, df = 91, p-value = 0.002415
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.4840102 -0.1145071
## sample estimates:
## cor
## -0.3109618
Relationship between change in different mindfulness facets and depression change
cor.test(mindfulness_changes$aware_change, mindfulness_changes$depr_changes.depr_change)
##
## Pearson's product-moment correlation
##
## data: mindfulness_changes$aware_change and mindfulness_changes$depr_changes.depr_change
## t = -3.1211, df = 91, p-value = 0.002415
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.4840102 -0.1145071
## sample estimates:
## cor
## -0.3109618
cor.test(mindfulness_changes$nj_change, mindfulness_changes$depr_changes.depr_change)
##
## Pearson's product-moment correlation
##
## data: mindfulness_changes$nj_change and mindfulness_changes$depr_changes.depr_change
## t = -4.1273, df = 91, p-value = 8.117e-05
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.5558350 -0.2103992
## sample estimates:
## cor
## -0.3970883
cor.test(mindfulness_changes$nr_change, mindfulness_changes$depr_changes.depr_change)
##
## Pearson's product-moment correlation
##
## data: mindfulness_changes$nr_change and mindfulness_changes$depr_changes.depr_change
## t = -0.82196, df = 91, p-value = 0.4132
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.2845783 0.1199595
## sample estimates:
## cor
## -0.08584661
Robustness check: Depression and DDM – still relevant?
# cor.test(mindfulness_changes$dr_change_ddm,
# mindfulness_changes$depr_changes.depr_change)
These are also too big to put in Public_Data
even_ddm <- read.csv("../../model_res/traces/DDM_split_half_even_wtrialwise_poutlier057074.csv")
odd_ddm <- read.csv("../../model_res/traces/DDM_split_half_odd__wtrialwise_poutlier052866.csv")
pre_t_d <- read.csv("./../../model_res/traces/DDM_test_retest_PRE_s-val_wtrialwise_poutlier059850.csv")
post_t_d <- read.csv("./../../model_res/traces/DDM_test_retest_POST_s-val_wtrialwise_poutlier055680.csv")
even_emp <- read.csv("../../data/cleaned_files/s_bdf_even.csv")
odd_emp <- read.csv("../../data/cleaned_files/s_bdf_odd.csv")
# even_ddm <- read.csv("../../model_res/traces/DDM_split_half_even2558.csv")
# odd_ddm <- read.csv("../../model_res/traces/DDM_split_half_odd3780.csv")
GetICCData <- function(vec1, vec2) {
out <- psych::ICC(data.table(vec1, vec2))
data.table(out$results$ICC[2], out$results$`lower bound`[2], out$results$`upper bound`[2]) %>%
setNames(c("ICC", "95_ci_lb", "95_ci_ub"))
}
Behavioral effects
Split-half
even_rt <- even_emp %>%
group_by(subj_idx) %>% summarize_at(c("rt"), mean, na.rm=TRUE)
odd_rt <- odd_emp %>%
group_by(subj_idx) %>% summarize_at(c("rt"), mean, na.rm=TRUE)
even_rt_n <- even_emp %>% filter(valence=="negative") %>%
group_by(subj_idx) %>% summarize_at(c("rt"), mean, na.rm=TRUE)
odd_rt_n <- odd_emp %>% filter(valence=="negative") %>%
group_by(subj_idx) %>% summarize_at(c("rt"), mean, na.rm=TRUE)
even_rt_p <- even_emp %>% filter(valence=="positive") %>%
group_by(subj_idx) %>% summarize_at(c("rt"), mean, na.rm=TRUE)
odd_rt_p <- odd_emp %>% filter(valence=="positive") %>%
group_by(subj_idx) %>% summarize_at(c("rt"), mean, na.rm=TRUE)
Calculate valence rt diffs
if (all(even_rt_n$subj_idx==odd_rt_n$subj_idx)) {
even_v_rt_diff <- even_rt_p$rt - even_rt_n$rt
odd_v_rt_diff <- odd_rt_p$rt - odd_rt_n$rt
cor.test(even_v_rt_diff, odd_v_rt_diff)
psych::ICC(data.table(even_v_rt_diff, odd_v_rt_diff))
}
## boundary (singular) fit: see help('isSingular')
## Call: psych::ICC(x = data.table(even_v_rt_diff, odd_v_rt_diff))
##
## Intraclass correlation coefficients
## type ICC F df1 df2 p lower bound upper bound
## Single_raters_absolute ICC1 0.36 2.1 96 97 0.00013 0.21 0.50
## Single_random_raters ICC2 0.36 2.1 96 96 0.00013 0.21 0.50
## Single_fixed_raters ICC3 0.36 2.1 96 96 0.00013 0.21 0.50
## Average_raters_absolute ICC1k 0.53 2.1 96 97 0.00013 0.34 0.66
## Average_random_raters ICC2k 0.53 2.1 96 96 0.00013 0.34 0.66
## Average_fixed_raters ICC3k 0.53 2.1 96 96 0.00013 0.34 0.66
##
## Number of subjects = 97 Number of Judges = 2
## See the help file for a discussion of the other 4 McGraw and Wong estimates,
even_pends <- even_emp %>% filter( valence=="positive") %>%
group_by(subj_idx) %>% summarize_at(c("response"), mean, na.rm=TRUE)
even_nends <- even_emp %>% filter(valence=="negative") %>%
group_by(subj_idx) %>% summarize_at(c("response"), mean, na.rm=TRUE)
even_pos_min_neg <- even_pends$response - even_nends$response
odd_pends <- odd_emp %>% filter(valence=="positive") %>%
group_by(subj_idx) %>% summarize_at(c("response"), mean, na.rm=TRUE)
odd_nends <- odd_emp %>% filter(valence=="negative") %>%
group_by(subj_idx) %>% summarize_at(c("response"), mean, na.rm=TRUE)
odd_pos_min_neg <- odd_pends$response - odd_nends$response
Test-retest
pre_rt <- bx_df %>% filter(session=="Pre") %>%
group_by(subj_idx) %>% summarize_at(c("rt"), mean, na.rm=TRUE)
post_rt <- bx_df %>% filter(session=="Post") %>%
group_by(subj_idx) %>% summarize_at(c("rt"), mean, na.rm=TRUE)
pre_rt_n <- bx_df %>% filter(session=="Pre", valence=="negative") %>%
group_by(subj_idx) %>% summarize_at(c("rt"), mean, na.rm=TRUE)
post_rt_n <- bx_df %>% filter(session=="Post", valence=="negative") %>%
group_by(subj_idx) %>% summarize_at(c("rt"), mean, na.rm=TRUE)
pre_rt_p <- bx_df %>% filter(session=="Pre", valence=="positive") %>%
group_by(subj_idx) %>% summarize_at(c("rt"), mean, na.rm=TRUE)
post_rt_p <- bx_df %>% filter(session=="Post", valence=="positive") %>%
group_by(subj_idx) %>% summarize_at(c("rt"), mean, na.rm=TRUE)
if (all(pre_rt$subj_idx == post_rt$subj_idx)) cor.test(pre_rt$rt, post_rt$rt); psych::ICC(data.table(pre_rt$rt, post_rt$rt))
##
## Pearson's product-moment correlation
##
## data: pre_rt$rt and post_rt$rt
## t = 8.6151, df = 94, p-value = 1.611e-13
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.5349965 0.7631006
## sample estimates:
## cor
## 0.6642369
## Call: psych::ICC(x = data.table(pre_rt$rt, post_rt$rt))
##
## Intraclass correlation coefficients
## type ICC F df1 df2 p lower bound upper bound
## Single_raters_absolute ICC1 0.63 4.4 95 96 2.4e-12 0.51 0.72
## Single_random_raters ICC2 0.64 4.9 95 95 7.0e-14 0.50 0.73
## Single_fixed_raters ICC3 0.66 4.9 95 95 7.0e-14 0.56 0.75
## Average_raters_absolute ICC1k 0.77 4.4 95 96 2.4e-12 0.68 0.84
## Average_random_raters ICC2k 0.78 4.9 95 95 7.0e-14 0.67 0.85
## Average_fixed_raters ICC3k 0.80 4.9 95 95 7.0e-14 0.72 0.86
##
## Number of subjects = 96 Number of Judges = 2
## See the help file for a discussion of the other 4 McGraw and Wong estimates,
if (all(pre_rt_n$subj_idx == post_rt_n$subj_idx)) cor.test(pre_rt_n$rt, post_rt_n$rt); psych::ICC(data.table(pre_rt_n$rt, post_rt_n$rt)); test <- "ok"
##
## Pearson's product-moment correlation
##
## data: pre_rt_n$rt and post_rt_n$rt
## t = 7.7012, df = 94, p-value = 1.346e-11
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.4815411 0.7312768
## sample estimates:
## cor
## 0.6219797
## Call: psych::ICC(x = data.table(pre_rt_n$rt, post_rt_n$rt))
##
## Intraclass correlation coefficients
## type ICC F df1 df2 p lower bound upper bound
## Single_raters_absolute ICC1 0.57 3.7 95 96 3.2e-10 0.45 0.68
## Single_random_raters ICC2 0.59 4.2 95 95 7.0e-12 0.44 0.70
## Single_fixed_raters ICC3 0.62 4.2 95 95 7.0e-12 0.50 0.71
## Average_raters_absolute ICC1k 0.73 3.7 95 96 3.2e-10 0.62 0.81
## Average_random_raters ICC2k 0.74 4.2 95 95 7.0e-12 0.61 0.82
## Average_fixed_raters ICC3k 0.76 4.2 95 95 7.0e-12 0.67 0.83
##
## Number of subjects = 96 Number of Judges = 2
## See the help file for a discussion of the other 4 McGraw and Wong estimates,
if (all(pre_rt_p$subj_idx == post_rt_p$subj_idx)) cor.test(pre_rt_p$rt, post_rt_p$rt); psych::ICC(data.table(pre_rt_p$rt, post_rt_p$rt)); test2 <- "ok"
##
## Pearson's product-moment correlation
##
## data: pre_rt_p$rt and post_rt_p$rt
## t = 8.628, df = 94, p-value = 1.513e-13
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.5357038 0.7635144
## sample estimates:
## cor
## 0.6647905
## Call: psych::ICC(x = data.table(pre_rt_p$rt, post_rt_p$rt))
##
## Intraclass correlation coefficients
## type ICC F df1 df2 p lower bound upper bound
## Single_raters_absolute ICC1 0.64 4.6 95 96 5.4e-13 0.53 0.73
## Single_random_raters ICC2 0.65 4.9 95 95 6.9e-14 0.53 0.74
## Single_fixed_raters ICC3 0.66 4.9 95 95 6.9e-14 0.56 0.75
## Average_raters_absolute ICC1k 0.78 4.6 95 96 5.4e-13 0.69 0.84
## Average_random_raters ICC2k 0.79 4.9 95 95 6.9e-14 0.69 0.85
## Average_fixed_raters ICC3k 0.80 4.9 95 95 6.9e-14 0.72 0.86
##
## Number of subjects = 96 Number of Judges = 2
## See the help file for a discussion of the other 4 McGraw and Wong estimates,
Calculate valence rt diffs. A bit worse but even it’s not bad
if (test=="ok" && test2=="ok") {
pre_v_rt_diff <- pre_rt_p$rt - pre_rt_n$rt
post_v_rt_diff <- post_rt_p$rt - post_rt_n$rt
cor.test(pre_v_rt_diff, post_v_rt_diff)
psych::ICC(data.table(pre_v_rt_diff, post_v_rt_diff))
} else {
cat("Error with data alignment—see above cell.")
}
## Call: psych::ICC(x = data.table(pre_v_rt_diff, post_v_rt_diff))
##
## Intraclass correlation coefficients
## type ICC F df1 df2 p lower bound upper bound
## Single_raters_absolute ICC1 0.42 2.5 95 96 7.7e-06 0.27 0.55
## Single_random_raters ICC2 0.42 2.5 95 95 7.2e-06 0.28 0.55
## Single_fixed_raters ICC3 0.42 2.5 95 95 7.2e-06 0.28 0.55
## Average_raters_absolute ICC1k 0.59 2.5 95 96 7.7e-06 0.43 0.71
## Average_random_raters ICC2k 0.59 2.5 95 95 7.2e-06 0.43 0.71
## Average_fixed_raters ICC3k 0.60 2.5 95 95 7.2e-06 0.43 0.71
##
## Number of subjects = 96 Number of Judges = 2
## See the help file for a discussion of the other 4 McGraw and Wong estimates,
pre_pends <- bx_df %>% filter(session=="Pre", valence=="positive") %>%
group_by(subj_idx) %>% summarize_at(c("response"), mean, na.rm=TRUE)
pre_nends <- bx_df %>% filter(session=="Pre", valence=="negative") %>%
group_by(subj_idx) %>% summarize_at(c("response"), mean, na.rm=TRUE)
pre_pos_min_neg <- pre_pends$response - pre_nends$response
post_pends <- bx_df %>% filter(session=="Post", valence=="positive") %>%
group_by(subj_idx) %>% summarize_at(c("response"), mean, na.rm=TRUE)
post_nends <- bx_df %>% filter(session=="Post", valence=="negative") %>%
group_by(subj_idx) %>% summarize_at(c("response"), mean, na.rm=TRUE)
post_pos_min_neg <- post_pends$response - post_nends$response
# post_pends <- bx_df %>% filter(session=="Post", valence=="positive") %>%
# group_by(subj_idx) %>% summarize_at(c("response"), mean, na.rm=TRUE)
#
# pre_pends <- bx_df %>% filter(session=="Pre", valence=="positive") %>%
# group_by(subj_idx) %>% summarize_at(c("response"), mean, na.rm=TRUE)
#
# post_pends <- bx_df %>% filter(session=="Post", valence=="positive") %>%
# group_by(subj_idx) %>% summarize_at(c("response"), mean, na.rm=TRUE)
Following Hedge et al. 17 using ICC2
Reliabilities were calculated using Intraclass Correlation Coefficients (ICC) using a two-way random effects model for absolute agreement. In the commonly cited Shrout and Fleiss (1979; see also McGraw & Wong, 1996) nomenclature, this corresponds to ICC (2,1). This form of the ICC is sensitive to differences between session means. In Supplementary Material A, we perform further analyses to account for potential outliers and distributional assumptions. The choice of statistic does not affect our conclusions. We report reliabilities separately for Studies 1 and 2 in the main text so that consistency across samples can be observed. We combine the studies in supplementary analyses.
Split half
mf_ICCs_sh <- rbind(
data.frame(GetICCData(even_rt$rt, odd_rt$rt), "mean \nreaction time") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var")),
data.frame(GetICCData(even_v_rt_diff, odd_v_rt_diff), "difference in \nreaction time by valence") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var")),
data.frame(GetICCData(even_nends$response, odd_nends$response), "proportion neg. endorsements") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var")),
data.frame(GetICCData(even_pends$response, odd_pends$response), "proportion pos. endorsements") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var")),
data.frame(GetICCData(even_pos_min_neg, odd_pos_min_neg), "ratio of positive:negative \n endorsements") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var"))
)
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
# mf_ICCs_sh$lb <- mf_ICCs_sh$ICC-mf_ICCs_sh$ci_lb
# mf_ICCs_sh$ub <- mf_ICCs_sh$ci_ub - mf_ICCs_sh$ICC
mf_ICCs_sh$type <- "split_half"
mf_ICCs_sh
## ICC ci_lb ci_ub var
## 1 0.9155235 0.8837085 0.9389370 mean \nreaction time
## 2 0.3603387 0.2059824 0.4972983 difference in \nreaction time by valence
## 3 0.5495007 0.4214780 0.6561456 proportion neg. endorsements
## 4 0.4546658 0.3115902 0.5776796 proportion pos. endorsements
## 5 0.6119708 0.4958958 0.7066414 ratio of positive:negative \n endorsements
## type
## 1 split_half
## 2 split_half
## 3 split_half
## 4 split_half
## 5 split_half
Test retest
mf_ICCs_tr <- rbind(
data.frame(GetICCData(pre_rt$rt, post_rt$rt), "mean \nreaction time") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var")),
data.frame(GetICCData(pre_v_rt_diff, post_v_rt_diff), "difference in \nreaction time by valence") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var")),
data.frame(GetICCData(pre_nends$response, post_nends$response), "proportion neg. endorsements") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var")),
data.frame(GetICCData(pre_pends$response, post_pends$response), "proportion pos. endorsements") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var")),
data.frame(GetICCData(pre_pos_min_neg, post_pos_min_neg), "ratio of positive:negative \n endorsements") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var"))
)
# mf_ICCs_tr$lb <- mf_ICCs_tr$ICC-mf_ICCs_tr$ci_lb
# mf_ICCs_tr$ub <- mf_ICCs_tr$ci_ub - mf_ICCs_tr$ICC
mf_ICCs_tr$type <- "test_retest"
mf_ICCs_tr
## ICC ci_lb ci_ub var
## 1 0.6356479 0.5028770 0.7346966 mean \nreaction time
## 2 0.4233395 0.2758852 0.5515546 difference in \nreaction time by valence
## 3 0.5412898 0.3742585 0.6660359 proportion neg. endorsements
## 4 0.5640652 0.4313549 0.6714392 proportion pos. endorsements
## 5 0.5352843 0.3614335 0.6635753 ratio of positive:negative \n endorsements
## type
## 1 test_retest
## 2 test_retest
## 3 test_retest
## 4 test_retest
## 5 test_retest
mf_iccs <- rbind(
data.frame(mf_ICCs_sh, "label"="split-half"),
data.frame(mf_ICCs_tr, "label"="test-retest")
)
Model-based measures
Split half
DDM
d_even_me <- GetMapEsts(GetSubjTraces(even_ddm), format="short")
d_odd_me <- GetMapEsts(GetSubjTraces(odd_ddm), format="short")
# Combine the datasets
if (all(d_even_me$ID == d_odd_me$ID)) {
c_me_d <- data.table(d_even_me, d_odd_me[2:ncol(d_odd_me)] %>%
setNames(paste0(names(d_odd_me[2:ncol(d_odd_me)]), "odd")))
}
mb_d_ICCs_sh <- rbind(
data.frame(GetICCData(c_me_d$a_, c_me_d$a_odd), "threshold") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var")),
data.frame(GetICCData(c_me_d$t_, c_me_d$t_odd), "non-decision time") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var")),
# Dummy var for this since not in DDM
# The one that generates convergence error.. also has lowest ICC
data.frame(GetICCData(InvLogit(c_me_d$z_), InvLogit(c_me_d$z_odd)), "starting point bias") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var")),
data.frame(GetICCData(c_me_d$v_Intercept_, c_me_d$v_Intercept_odd), "drift rate - intercept") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var")),
data.frame(GetICCData(c_me_d$v_val_ctr_, c_me_d$v_val_ctr_odd), "drift rate - pos. vs. neg. \nregressor") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var"))
#data.frame(GetICCData(data.table(even_rt$rt, odd_rt$rt), "RT mean") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var"))
)
## boundary (singular) fit: see help('isSingular')
# mb_d_ICCs_sh$lb <- mb_d_ICCs_sh$ICC-mb_d_ICCs_sh$ci_lb
# mb_d_ICCs_sh$ub <- mb_d_ICCs_sh$ci_ub-mb_d_ICCs_sh$ICC
mb_d_ICCs_sh
## ICC ci_lb ci_ub var
## 1 0.647664393 0.536589071 0.73616986 threshold
## 2 0.777602330 0.701923310 0.83596992 non-decision time
## 3 0.003412813 -0.002956936 0.01331431 starting point bias
## 4 0.396511669 0.220357617 0.54120637 drift rate - intercept
## 5 0.671413324 0.564663987 0.75534506 drift rate - pos. vs. neg. \nregressor
Test retest
DDM
#post_t_d <- read.csv("./../model_res/traces/DDM_test_retest_POST_m0_sret_v_val2062.csv") # Sub in the correct one
d_pre_me <- GetMapEsts(GetSubjTraces(pre_t_d), format="short")
d_post_me <- GetMapEsts(GetSubjTraces(post_t_d), format="short")
# Combine the datasets
if (all(d_pre_me$ID == d_post_me$ID)) {
c_me_d <- data.table(d_pre_me, d_post_me[2:ncol(d_post_me)] %>%
setNames(paste0(names(d_post_me[2:ncol(d_post_me)]), "post")))
}
mb_d_ICCs <- rbind(
data.frame(GetICCData(c_me_d$a_, c_me_d$a_post), "threshold") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var")),
data.frame(GetICCData(c_me_d$t_, c_me_d$t_post), "non-decision time") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var")),
# Dummy var for this since not in DDM
# The one that generates convergence error.. also has lowest ICC
data.frame(GetICCData(InvLogit(c_me_d$z_), InvLogit(c_me_d$z_post)), "starting point bias") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var")),
data.frame(GetICCData(c_me_d$v_Intercept_, c_me_d$v_Intercept_post), "drift rate - intercept") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var")),
data.frame(GetICCData(c_me_d$v_val_ctr_, c_me_d$v_val_ctr_post), "drift rate - pos. vs. neg. \nregressor") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var"))
#data.frame(GetICCData(data.table(pre_rt$rt, post_rt$rt), "RT mean") %>% setNames(c("ICC", "ci_lb", "ci_ub", "var"))
)
## boundary (singular) fit: see help('isSingular')
# mb_d_ICCs$lb <- mb_d_ICCs$ICC-mb_d_ICCs$ci_lb
# mb_d_ICCs$ub <- mb_d_ICCs$ci_ub-mb_d_ICCs$ICC
ddm_iccs <- rbind(
data.frame(mb_d_ICCs_sh, "label"="split-half"),
data.frame(mb_d_ICCs, "label"="test-retest")
)
mf_p <- ggplot(mf_iccs, aes(x=var, y=ICC, group=label, fill=label)) +
geom_errorbar(aes(ymin=ci_lb, ymax=ci_ub), position=position_dodge(width=.4), size=2, width=0) +
geom_hline(yintercept=c(0, .25, .5, .75, 1)) +
geom_point(alpha=.9, alpha=.9, size=8, pch=21, position=position_dodge(width=.4)) +
#geom_hline(yintercept = 0, linetype="dotted") +
theme(axis.text.x = element_text(angle=25, hjust=1, size=16)) +
ga + ap + ylab("") + ylim(-.1, 1) + xlab("") +
#ggtitle("Model-derived measures") +
#theme(axis.text.y = element_blank(), axis.ticks.y=element_blank()) +
theme(plot.title = element_text(margin=margin(b=-25),
size=30, hjust=.15, vjust=.6)) +
scale_fill_manual(values=c("grey24", "gray65")) + lp #+
## Warning: Duplicated aesthetics after name standardisation: alpha
# ggtitle("Behavioral measures") +
# theme(plot.title = element_text(margin=margin(b=-25),
# size=30, hjust=.15, vjust=.6))
#mf_p
mb_p <- ggplot(ddm_iccs, aes(x=var, y=ICC, group=label, fill=label)) +
geom_errorbar(aes(ymin=ci_lb, ymax=ci_ub), position=position_dodge(width=.4), size=2, width=0) +
geom_hline(yintercept=c(0, .25, .5, .75, 1)) +
geom_point(alpha=.9, alpha=.9, size=8, pch=21, position=position_dodge(width=.4)) +
#geom_hline(yintercept = 0, linetype="dotted") +
theme(axis.text.x = element_text(angle=25, hjust=1, size=16)) +
ga + ap + ylab("") + ylim(-.1, 1) + xlab("") +
#ggtitle("Model-derived measures") +
#theme(axis.text.y = element_blank(), axis.ticks.y=element_blank()) +
theme(plot.title = element_text(margin=margin(b=-25),
size=30, hjust=.15, vjust=.6)) +
scale_fill_manual(values=c("tan1", "chocolate")) + lp
## Warning: Duplicated aesthetics after name standardisation: alpha
iccs_p <- mf_p / mb_p +
plot_annotation(
title = 'Stability of behavioral (top) and model-based (bottom) measures',
subtitle='ICC point estimates and 95% CIs',
theme = theme(plot.title = element_text(size = 25, hjust=.5),
plot.subtitle = element_text(size = 20, hjust=.05))
)
iccs_p
#ggsave("../paper/figs/icc_plot.png", iccs_p, width=11, height=10)
3.25 - Note that after R upgrade factor rule changes must have led to change in order on x-axis compared to version in figure in paper, but spot-checked to confirm all results were the same.